HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in hadoop-hdfs. Contributed by Walter Su.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index fb02476..0d5bf8f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -30,12 +30,6 @@
   public static final String CODEC_NAME_KEY = "codec";
 
   /**
-   * A friendly and understandable name that can mean what's it, also serves as
-   * the identifier that distinguish it from other schemas.
-   */
-  private final String schemaName;
-
-  /**
    * The erasure codec name associated.
    */
   private final String codecName;
@@ -59,14 +53,9 @@
   /**
    * Constructor with schema name and provided all options. Note the options may
    * contain additional information for the erasure codec to interpret further.
-   * @param schemaName schema name
    * @param allOptions all schema options
    */
-  public ECSchema(String schemaName, Map<String, String> allOptions) {
-    assert (schemaName != null && ! schemaName.isEmpty());
-
-    this.schemaName = schemaName;
-
+  public ECSchema(Map<String, String> allOptions) {
     if (allOptions == null || allOptions.isEmpty()) {
       throw new IllegalArgumentException("No schema options are provided");
     }
@@ -94,33 +83,27 @@
 
   /**
    * Constructor with key parameters provided.
-   * @param schemaName schema name
    * @param codecName codec name
    * @param numDataUnits number of data units used in the schema
    * @param numParityUnits number os parity units used in the schema
    */
-  public ECSchema(String schemaName, String codecName,
-                  int numDataUnits, int numParityUnits) {
-    this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  public ECSchema(String codecName, int numDataUnits, int numParityUnits) {
+    this(codecName, numDataUnits, numParityUnits, null);
   }
 
   /**
    * Constructor with key parameters provided. Note the extraOptions may contain
    * additional information for the erasure codec to interpret further.
-   * @param schemaName schema name
    * @param codecName codec name
    * @param numDataUnits number of data units used in the schema
    * @param numParityUnits number os parity units used in the schema
    * @param extraOptions extra options to configure the codec
    */
-  public ECSchema(String schemaName, String codecName, int numDataUnits,
-                  int numParityUnits, Map<String, String> extraOptions) {
-
-    assert (schemaName != null && ! schemaName.isEmpty());
+  public ECSchema(String codecName, int numDataUnits, int numParityUnits,
+      Map<String, String> extraOptions) {
     assert (codecName != null && ! codecName.isEmpty());
     assert (numDataUnits > 0 && numParityUnits > 0);
 
-    this.schemaName = schemaName;
     this.codecName = codecName;
     this.numDataUnits = numDataUnits;
     this.numParityUnits = numParityUnits;
@@ -154,14 +137,6 @@
   }
 
   /**
-   * Get the schema name
-   * @return schema name
-   */
-  public String getSchemaName() {
-    return schemaName;
-  }
-
-  /**
    * Get the codec name
    * @return codec name
    */
@@ -201,7 +176,6 @@
   public String toString() {
     StringBuilder sb = new StringBuilder("ECSchema=[");
 
-    sb.append("Name=" + schemaName + ", ");
     sb.append("Codec=" + codecName + ", ");
     sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
     sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
@@ -235,9 +209,6 @@
     if (numParityUnits != ecSchema.numParityUnits) {
       return false;
     }
-    if (!schemaName.equals(ecSchema.schemaName)) {
-      return false;
-    }
     if (!codecName.equals(ecSchema.codecName)) {
       return false;
     }
@@ -246,8 +217,7 @@
 
   @Override
   public int hashCode() {
-    int result = schemaName.hashCode();
-    result = 31 * result + codecName.hashCode();
+    int result = codecName.hashCode();
     result = 31 * result + extraOptions.hashCode();
     result = 31 * result + numDataUnits;
     result = 31 * result + numParityUnits;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
deleted file mode 100644
index fce46f8..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
-
-/**
- * A EC schema loading utility that loads predefined EC schemas from XML file
- */
-public class SchemaLoader {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      SchemaLoader.class.getName());
-
-  /**
-   * Load predefined ec schemas from configuration file. This file is
-   * expected to be in the XML format.
-   */
-  public List<ECSchema> loadSchema(String schemaFilePath) {
-    File confFile = getSchemaFile(schemaFilePath);
-    if (confFile == null) {
-      LOG.warn("Not found any predefined EC schema file");
-      return Collections.emptyList();
-    }
-
-    try {
-      return loadSchema(confFile);
-    } catch (ParserConfigurationException e) {
-      throw new RuntimeException("Failed to load schema file: " + confFile);
-    } catch (IOException e) {
-      throw new RuntimeException("Failed to load schema file: " + confFile);
-    } catch (SAXException e) {
-      throw new RuntimeException("Failed to load schema file: " + confFile);
-    }
-  }
-
-  private List<ECSchema> loadSchema(File schemaFile)
-      throws ParserConfigurationException, IOException, SAXException {
-
-    LOG.info("Loading predefined EC schema file {}", schemaFile);
-
-    // Read and parse the schema file.
-    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
-    dbf.setIgnoringComments(true);
-    DocumentBuilder builder = dbf.newDocumentBuilder();
-    Document doc = builder.parse(schemaFile);
-    Element root = doc.getDocumentElement();
-
-    if (!"schemas".equals(root.getTagName())) {
-      throw new RuntimeException("Bad EC schema config file: " +
-          "top-level element not <schemas>");
-    }
-
-    NodeList elements = root.getChildNodes();
-    List<ECSchema> schemas = new ArrayList<ECSchema>();
-    for (int i = 0; i < elements.getLength(); i++) {
-      Node node = elements.item(i);
-      if (node instanceof Element) {
-        Element element = (Element) node;
-        if ("schema".equals(element.getTagName())) {
-          ECSchema schema = loadSchema(element);
-            schemas.add(schema);
-        } else {
-          LOG.warn("Bad element in EC schema configuration file: {}",
-              element.getTagName());
-        }
-      }
-    }
-
-    return schemas;
-  }
-
-  /**
-   * Path to the XML file containing predefined ec schemas. If the path is
-   * relative, it is searched for in the classpath.
-   */
-  private File getSchemaFile(String schemaFilePath) {
-    File schemaFile = new File(schemaFilePath);
-    if (! schemaFile.isAbsolute()) {
-      URL url = Thread.currentThread().getContextClassLoader()
-          .getResource(schemaFilePath);
-      if (url == null) {
-        LOG.warn("{} not found on the classpath.", schemaFilePath);
-        schemaFile = null;
-      } else if (! url.getProtocol().equalsIgnoreCase("file")) {
-        throw new RuntimeException(
-            "EC predefined schema file " + url +
-                " found on the classpath is not on the local filesystem.");
-      } else {
-        schemaFile = new File(url.getPath());
-      }
-    }
-
-    return schemaFile;
-  }
-
-  /**
-   * Loads a schema from a schema element in the configuration file
-   */
-  private ECSchema loadSchema(Element element) {
-    String schemaName = element.getAttribute("name");
-    Map<String, String> ecOptions = new HashMap<String, String>();
-    NodeList fields = element.getChildNodes();
-
-    for (int i = 0; i < fields.getLength(); i++) {
-      Node fieldNode = fields.item(i);
-      if (fieldNode instanceof Element) {
-        Element field = (Element) fieldNode;
-        String tagName = field.getTagName();
-        String value = ((Text) field.getFirstChild()).getData().trim();
-        ecOptions.put(tagName, value);
-      }
-    }
-
-    ECSchema schema = new ECSchema(schemaName, ecOptions);
-    return schema;
-  }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
index c362b96..1d39901 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
@@ -26,7 +26,6 @@
 
   @Test
   public void testGoodSchema() {
-    String schemaName = "goodSchema";
     int numDataUnits = 6;
     int numParityUnits = 3;
     String codec = "rs";
@@ -39,10 +38,9 @@
     options.put(ECSchema.CODEC_NAME_KEY, codec);
     options.put(extraOption, extraOptionValue);
 
-    ECSchema schema = new ECSchema(schemaName, options);
+    ECSchema schema = new ECSchema(options);
     System.out.println(schema.toString());
-    
-    assertEquals(schemaName, schema.getSchemaName());
+
     assertEquals(numDataUnits, schema.getNumDataUnits());
     assertEquals(numParityUnits, schema.getNumParityUnits());
     assertEquals(codec, schema.getCodecName());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
deleted file mode 100644
index 50d2091..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.PrintWriter;
-import java.util.List;
-
-import org.junit.Test;
-
-public class TestSchemaLoader {
-
-  final static String TEST_DIR = new File(System.getProperty(
-      "test.build.data", "/tmp")).getAbsolutePath();
-
-  final static String SCHEMA_FILE = new File(TEST_DIR, "test-ecschema")
-      .getAbsolutePath();
-
-  @Test
-  public void testLoadSchema() throws Exception {
-    PrintWriter out = new PrintWriter(new FileWriter(SCHEMA_FILE));
-    out.println("<?xml version=\"1.0\"?>");
-    out.println("<schemas>");
-    out.println("  <schema name=\"RSk6m3\">");
-    out.println("    <numDataUnits>6</numDataUnits>");
-    out.println("    <numParityUnits>3</numParityUnits>");
-    out.println("    <codec>RS</codec>");
-    out.println("  </schema>");
-    out.println("  <schema name=\"RSk10m4\">");
-    out.println("    <numDataUnits>10</numDataUnits>");
-    out.println("    <numParityUnits>4</numParityUnits>");
-    out.println("    <codec>RS</codec>");
-    out.println("  </schema>");
-    out.println("</schemas>");
-    out.close();
-
-    SchemaLoader schemaLoader = new SchemaLoader();
-    List<ECSchema> schemas = schemaLoader.loadSchema(SCHEMA_FILE);
-
-    assertEquals(2, schemas.size());
-
-    ECSchema schema1 = schemas.get(0);
-    assertEquals("RSk6m3", schema1.getSchemaName());
-    assertEquals(0, schema1.getExtraOptions().size());
-    assertEquals(6, schema1.getNumDataUnits());
-    assertEquals(3, schema1.getNumParityUnits());
-    assertEquals("RS", schema1.getCodecName());
-
-    ECSchema schema2 = schemas.get(1);
-    assertEquals("RSk10m4", schema2.getSchemaName());
-    assertEquals(0, schema2.getExtraOptions().size());
-    assertEquals(10, schema2.getNumDataUnits());
-    assertEquals(4, schema2.getNumParityUnits());
-    assertEquals("RS", schema2.getCodecName());
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 214e15d..00191e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -185,8 +185,8 @@
 
     String  THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
     /**
-     * With default 6+3 schema, each normal read could span 6 DNs. So this
-     * default value accommodates 3 read streams
+     * With default RS-6-3-64k erasure coding policy, each normal read could span
+     * 6 DNs, so this default value accommodates 3 read streams
      */
     int     THREADPOOL_SIZE_DEFAULT = 18;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index a82e138..279557f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,7 +45,6 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
@@ -1486,21 +1485,20 @@
   EventBatchList getEditsFromTxid(long txid) throws IOException;
 
   /**
-   * Create an erasure coding zone with specified schema, if any, otherwise
+   * Create an erasure coding zone with specified policy, if any, otherwise
    * default
    */
   @AtMostOnce
-  public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException;
 
   /**
-   * Gets list of ECSchemas loaded in Namenode
+   * Get the erasure coding policies loaded in Namenode
    *
-   * @return Returns the list of ECSchemas loaded at Namenode
    * @throws IOException
    */
   @Idempotent
-  public ECSchema[] getECSchemas() throws IOException;
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException;
 
   /**
    * Get the information about the EC zone for the path
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
new file mode 100644
index 0000000..e5dfdff
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+import java.util.Map;
+
+/**
+ * A policy about how to write/read/code an erasure coding file.
+ */
+public final class ErasureCodingPolicy {
+
+  private final String name;
+  private final ECSchema schema;
+  private final int cellSize;
+
+  public ErasureCodingPolicy(String name, ECSchema schema, int cellSize){
+    this.name = name;
+    this.schema = schema;
+    this.cellSize = cellSize;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public ECSchema getSchema() {
+    return schema;
+  }
+
+  public int getCellSize() {
+    return cellSize;
+  }
+
+  public int getNumDataUnits() {
+    return schema.getNumDataUnits();
+  }
+
+  public int getNumParityUnits() {
+    return schema.getNumParityUnits();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ErasureCodingPolicy that = (ErasureCodingPolicy) o;
+
+    if (that.getName().equals(name) && that.getCellSize() == cellSize
+        && that.getSchema().equals(schema)) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = name.hashCode();
+    result = 31 * result + schema.hashCode();
+    result = 31 * result + cellSize;
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ErasureCodingPolicy=[");
+    sb.append("Name=" + name + ", ");
+    sb.append("Schema=[" + schema.toString() + "], ");
+    sb.append("CellSize=" + cellSize + " ");
+    sb.append("]");
+    return sb.toString();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
index 655def3..533b630 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
@@ -16,21 +16,17 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import org.apache.hadoop.io.erasurecode.ECSchema;
-
 /**
  * Information about the EC Zone at the specified path.
  */
 public class ErasureCodingZone {
 
   private String dir;
-  private ECSchema schema;
-  private int cellSize;
+  private ErasureCodingPolicy ecPolicy;
 
-  public ErasureCodingZone(String dir, ECSchema schema, int cellSize) {
+  public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) {
     this.dir = dir;
-    this.schema = schema;
-    this.cellSize = cellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   /**
@@ -43,24 +39,16 @@
   }
 
   /**
-   * Get the schema for the EC Zone
+   * Get the erasure coding policy for the EC Zone
    * 
    * @return
    */
-  public ECSchema getSchema() {
-    return schema;
-  }
-
-  /**
-   * Get cellSize for the EC Zone
-   */
-  public int getCellSize() {
-    return cellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   @Override
   public String toString() {
-    return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: "
-        + cellSize;
+    return "Dir: " + getDir() + ", Policy: " + ecPolicy;
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index fa816e4..3d19ab9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -80,8 +80,8 @@
 
   /*
    * These values correspond to the values used by the system default erasure
-   * coding schema.
-   * TODO: to be removed once all places use schema.
+   * coding policy.
+   * TODO: get these values from ec policy of the associated INodeFile
    */
 
   public static final byte NUM_DATA_BLOCKS = 6;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 8c902b4..6e05ce0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -26,7 +26,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /** Interface that represents the over the wire information for a file.
  */
@@ -49,8 +48,7 @@
 
   private final FileEncryptionInfo feInfo;
 
-  private final ECSchema ecSchema;
-  private final int stripeCellSize;
+  private final ErasureCodingPolicy ecPolicy;
   
   // Used by dir, not including dot and dotdot. Always zero for a regular file.
   private final int childrenNum;
@@ -77,7 +75,7 @@
       long blocksize, long modification_time, long access_time,
       FsPermission permission, String owner, String group, byte[] symlink,
       byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
-      byte storagePolicy, ECSchema ecSchema, int stripeCellSize) {
+      byte storagePolicy, ErasureCodingPolicy ecPolicy) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
@@ -97,8 +95,7 @@
     this.childrenNum = childrenNum;
     this.feInfo = feInfo;
     this.storagePolicy = storagePolicy;
-    this.ecSchema = ecSchema;
-    this.stripeCellSize = stripeCellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   /**
@@ -256,12 +253,8 @@
     return feInfo;
   }
 
-  public ECSchema getECSchema() {
-    return ecSchema;
-  }
-
-  public int getStripeCellSize() {
-    return stripeCellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   public final int getChildrenNum() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index 735e7b2..6e01bbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -24,7 +24,6 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /**
  * Collection of blocks with their locations and the file length.
@@ -38,8 +37,7 @@
   private final LocatedBlock lastLocatedBlock;
   private final boolean isLastBlockComplete;
   private final FileEncryptionInfo fileEncryptionInfo;
-  private final ECSchema ecSchema;
-  private final int stripeCellSize;
+  private final ErasureCodingPolicy ecPolicy;
 
   public LocatedBlocks() {
     fileLength = 0;
@@ -48,22 +46,20 @@
     lastLocatedBlock = null;
     isLastBlockComplete = false;
     fileEncryptionInfo = null;
-    ecSchema = null;
-    stripeCellSize = 0;
+    ecPolicy = null;
   }
 
   public LocatedBlocks(long flength, boolean isUnderConstuction,
       List<LocatedBlock> blks, LocatedBlock lastBlock,
       boolean isLastBlockCompleted, FileEncryptionInfo feInfo,
-      ECSchema ecSchema, int stripeCellSize) {
+      ErasureCodingPolicy ecPolicy) {
     fileLength = flength;
     blocks = blks;
     underConstruction = isUnderConstuction;
     this.lastLocatedBlock = lastBlock;
     this.isLastBlockComplete = isLastBlockCompleted;
     this.fileEncryptionInfo = feInfo;
-    this.ecSchema = ecSchema;
-    this.stripeCellSize = stripeCellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   /**
@@ -120,17 +116,10 @@
   }
 
   /**
-   * @return The ECSchema for ErasureCoded file, null otherwise.
+   * @return The ECPolicy for ErasureCoded file, null otherwise.
    */
-  public ECSchema getECSchema() {
-    return ecSchema;
-  }
-
-  /**
-   * @return Stripe Cell size for ErasureCoded file, 0 otherwise.
-   */
-  public int getStripeCellSize() {
-    return stripeCellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index a6c7b10..813ea26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -61,7 +61,7 @@
       int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
     this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
         access_time, permission, owner, group, null, localName, inodeId,
-        childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0);
+        childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
     this.snapshotNumber = snapshotNumber;
     this.snapshotQuota = snapshotQuota;
     this.parentFullPath = parentFullPath;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index eeadd73..9ebf010 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -132,7 +132,7 @@
         blockSize, mTime, aTime, permission, owner, group,
         symlink, DFSUtilClient.string2Bytes(localName),
         fileId, childrenNum, null,
-        storagePolicy, null, 0);
+        storagePolicy, null);
   }
 
   /** Convert a Json map to an ExtendedBlock object. */
@@ -503,7 +503,7 @@
         (Map<?, ?>) m.get("lastLocatedBlock"));
     final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
     return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete, null, null, 0);
+        lastLocatedBlock, isLastBlockComplete, null, null);
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 62db8ea..fb10e9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -873,8 +873,8 @@
       returns(GetCurrentEditLogTxidResponseProto);
   rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
       returns(GetEditsFromTxidResponseProto);
-  rpc getECSchemas(GetECSchemasRequestProto)
-      returns(GetECSchemasResponseProto);
+  rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto)
+      returns(GetErasureCodingPoliciesResponseProto);
   rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
       returns(GetErasureCodingZoneResponseProto);
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 56bb7a2..d27f782 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -28,24 +28,22 @@
  */
 message ErasureCodingZoneProto {
   required string dir = 1;
-  required ECSchemaProto schema = 2;
-  required uint32 cellSize = 3;
+  required ErasureCodingPolicyProto ecPolicy = 2;
 }
 
 message CreateErasureCodingZoneRequestProto {
   required string src = 1;
-  optional ECSchemaProto schema = 2;
-  optional uint32 cellSize = 3;
+  optional ErasureCodingPolicyProto ecPolicy = 2;
 }
 
 message CreateErasureCodingZoneResponseProto {
 }
 
-message GetECSchemasRequestProto { // void request
+message GetErasureCodingPoliciesRequestProto { // void request
 }
 
-message GetECSchemasResponseProto {
-  repeated ECSchemaProto schemas = 1;
+message GetErasureCodingPoliciesResponseProto {
+  repeated ErasureCodingPolicyProto ecPolicies = 1;
 }
 
 message GetErasureCodingZoneRequestProto {
@@ -66,6 +64,5 @@
   required StorageUuidsProto targetStorageUuids = 4;
   required StorageTypesProto targetStorageTypes = 5;
   repeated uint32 liveBlockIndices = 6;
-  required ECSchemaProto ecSchema = 7;
-  required uint32 cellSize = 8;
-}
\ No newline at end of file
+  required ErasureCodingPolicyProto ecPolicy = 7;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index d2cb665..63fe90c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -306,8 +306,7 @@
   optional FileEncryptionInfoProto fileEncryptionInfo = 6;
 
   // Optional field for erasure coding
-  optional ECSchemaProto eCSchema = 7;
-  optional uint32 stripeCellSize = 8;
+  optional ErasureCodingPolicyProto ecPolicy = 7;
 }
 
 /**
@@ -322,11 +321,16 @@
  * ECSchema for erasurecoding
  */
 message ECSchemaProto {
-  required string schemaName = 1;
-  required string codecName = 2;
-  required uint32 dataUnits = 3;
-  required uint32 parityUnits = 4;
-  repeated ECSchemaOptionEntryProto options = 5;
+  required string codecName = 1;
+  required uint32 dataUnits = 2;
+  required uint32 parityUnits = 3;
+  repeated ECSchemaOptionEntryProto options = 4;
+}
+
+message ErasureCodingPolicyProto {
+  required string name = 1;
+  required ECSchemaProto schema = 2;
+  required uint32 cellSize = 3;
 }
 
 /**
@@ -365,8 +369,7 @@
   optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
 
   // Optional field for erasure coding
-  optional ECSchemaProto ecSchema = 17;
-  optional uint32 stripeCellSize = 18;
+  optional ErasureCodingPolicyProto ecPolicy = 17;
 }
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 45afd2c..173bf9b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -394,3 +394,6 @@
 
     HDFS-8827. Erasure Coding: Fix NPE when NameNode processes over-replicated
     striped blocks. (Walter Su and Takuya Fukudome via jing9)
+
+    HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in 
+    hadoop-hdfs. (Walter Su via zhz)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8bf1444..2a1d219 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -164,7 +164,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -1176,10 +1176,10 @@
     try {
       LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
       if (locatedBlocks != null) {
-        ECSchema schema = locatedBlocks.getECSchema();
-        if (schema != null) {
-          return new DFSStripedInputStream(this, src, verifyChecksum, schema,
-              locatedBlocks.getStripeCellSize(), locatedBlocks);
+        ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
+        if (ecPolicy != null) {
+          return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
+              locatedBlocks);
         }
         return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
       } else {
@@ -3012,12 +3012,12 @@
     return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
-  public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException {
     checkOpen();
     TraceScope scope = getPathTraceScope("createErasureCodingZone", src);
     try {
-      namenode.createErasureCodingZone(src, schema, cellSize);
+      namenode.createErasureCodingZone(src, ecPolicy);
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
           SafeModeException.class,
@@ -3139,11 +3139,11 @@
     }
   }
 
-  public ECSchema[] getECSchemas() throws IOException {
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     checkOpen();
-    TraceScope scope = Trace.startSpan("getECSchemas", traceSampler);
+    TraceScope scope = Trace.startSpan("getErasureCodingPolicies", traceSampler);
     try {
-      return namenode.getECSchemas();
+      return namenode.getErasureCodingPolicies();
     } finally {
       scope.close();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d7b292e..57f9be0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -691,12 +691,6 @@
   public static final boolean DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT =
       false;
 
-  public static final String DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE =
-      "dfs.client.striped.read.threadpool.size";
-  // With default 3+2 schema, each normal read could span 3 DNs. So this
-  // default value accommodates 6 read streams
-  public static final int DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE = 18;
-
   // Slow io warning log threshold settings for dfsclient and datanode.
   public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
     "dfs.datanode.slow.io.warning.threshold.ms";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 088f6dd..fdf6fa6 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -270,7 +270,7 @@
       }
       Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
       final DFSOutputStream out;
-      if(stat.getECSchema() != null) {
+      if(stat.getErasureCodingPolicy() != null) {
         out = new DFSStripedOutputStream(dfsClient, src, stat,
             flag, progress, checksum, favoredNodes);
       } else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 3612063..2ad63b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -36,7 +36,7 @@
 
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.util.DirectBufferPool;
@@ -147,7 +147,7 @@
   /** the buffer for a complete stripe */
   private ByteBuffer curStripeBuf;
   private ByteBuffer parityBuf;
-  private final ECSchema schema;
+  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureDecoder decoder;
 
   /**
@@ -158,15 +158,15 @@
   private final CompletionService<Void> readingService;
 
   DFSStripedInputStream(DFSClient dfsClient, String src,
-      boolean verifyChecksum, ECSchema schema, int cellSize,
+      boolean verifyChecksum, ErasureCodingPolicy ecPolicy,
       LocatedBlocks locatedBlocks) throws IOException {
     super(dfsClient, src, verifyChecksum, locatedBlocks);
 
-    assert schema != null;
-    this.schema = schema;
-    this.cellSize = cellSize;
-    dataBlkNum = (short) schema.getNumDataUnits();
-    parityBlkNum = (short) schema.getNumParityUnits();
+    assert ecPolicy != null;
+    this.ecPolicy = ecPolicy;
+    this.cellSize = ecPolicy.getCellSize();
+    dataBlkNum = (short) ecPolicy.getNumDataUnits();
+    parityBlkNum = (short) ecPolicy.getNumParityUnits();
     groupSize = dataBlkNum + parityBlkNum;
     blockReaders = new BlockReaderInfo[groupSize];
     curStripeRange = new StripeRange(0, 0);
@@ -282,7 +282,7 @@
         stripeLimit - stripeBufOffset);
 
     LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
-    AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(schema, cellSize,
+    AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize,
         blockGroup, offsetInBlockGroup,
         offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
     final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
@@ -510,7 +510,7 @@
     LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
 
     AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
-        schema, cellSize, blockGroup, start, end, buf, offset);
+        ecPolicy, cellSize, blockGroup, start, end, buf, offset);
     CompletionService<Void> readService = new ExecutorCompletionService<>(
         dfsClient.getStripedReadsThreadPool());
     final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 746b791..4ca8fe6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -38,7 +38,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
@@ -276,10 +276,10 @@
       LOG.debug("Creating DFSStripedOutputStream for " + src);
     }
 
-    final ECSchema schema = stat.getECSchema();
-    final int numParityBlocks = schema.getNumParityUnits();
-    cellSize = stat.getStripeCellSize();
-    numDataBlocks = schema.getNumDataUnits();
+    final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
+    final int numParityBlocks = ecPolicy.getNumParityUnits();
+    cellSize = ecPolicy.getCellSize();
+    numDataBlocks = ecPolicy.getNumDataUnits();
     numAllBlocks = numDataBlocks + numParityBlocks;
 
     encoder = CodecUtil.createRSRawEncoder(dfsClient.getConfiguration(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4c9f9cb..f8cca02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -90,7 +90,7 @@
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
@@ -2305,18 +2305,17 @@
    * Create the erasurecoding zone
    * 
    * @param path Directory to create the ec zone
-   * @param schema ECSchema for the zone. If not specified default will be used.
-   * @param cellSize Cellsize for the striped erasure coding
+   * @param ecPolicy erasure coding policy for the zone. If not specified default will be used.
    * @throws IOException
    */
-  public void createErasureCodingZone(final Path path, final ECSchema schema,
-      final int cellSize) throws IOException {
+  public void createErasureCodingZone(final Path path, final ErasureCodingPolicy ecPolicy)
+      throws IOException {
     Path absF = fixRelativePart(path);
     new FileSystemLinkResolver<Void>() {
       @Override
       public Void doCall(final Path p) throws IOException,
           UnresolvedLinkException {
-        dfs.createErasureCodingZone(getPathName(p), schema, cellSize);
+        dfs.createErasureCodingZone(getPathName(p), ecPolicy);
         return null;
       }
 
@@ -2324,7 +2323,7 @@
       public Void next(final FileSystem fs, final Path p) throws IOException {
         if (fs instanceof DistributedFileSystem) {
           DistributedFileSystem myDfs = (DistributedFileSystem) fs;
-          myDfs.createErasureCodingZone(p, schema, cellSize);
+          myDfs.createErasureCodingZone(p, ecPolicy);
           return null;
         }
         throw new UnsupportedOperationException(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 5a3c885b..e6e67cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -41,7 +41,7 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 /**
  * The public API for performing administrative functions on HDFS. Those writing
@@ -369,17 +369,13 @@
   /**
    * Create the ErasureCoding zone
    *
-   * @param path
-   *          Directory to create the ErasureCoding zone
-   * @param schema
-   *          ECSchema for the zone. If not specified default will be used.
-   * @param cellSize
-   *          Cellsize for the striped ErasureCoding
+   * @param path Directory to create the ErasureCoding zone
+   * @param ecPolicy erasure coding policy for the zone. If null, the default will be used.
    * @throws IOException
    */
-  public void createErasureCodingZone(final Path path, final ECSchema schema,
-      final int cellSize) throws IOException {
-    dfs.createErasureCodingZone(path, schema, cellSize);
+  public void createErasureCodingZone(final Path path,
+      final ErasureCodingPolicy ecPolicy) throws IOException {
+    dfs.createErasureCodingZone(path, ecPolicy);
   }
 
   /**
@@ -395,12 +391,11 @@
   }
 
   /**
-   * Get the ErasureCoding schemas supported.
+   * Get the ErasureCoding policies supported.
    *
-   * @return ECSchemas
    * @throws IOException
    */
-  public ECSchema[] getECSchemas() throws IOException {
-    return dfs.getClient().getECSchemas();
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+    return dfs.getClient().getErasureCodingPolicies();
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index 4701538..2121dcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -26,7 +26,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /** 
  * Interface that represents the over the wire information
@@ -60,10 +59,10 @@
       long access_time, FsPermission permission, String owner, String group,
       byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
       int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
-      ECSchema schema, int stripeCellSize) {
+      ErasureCodingPolicy ecPolicy) {
     super(length, isdir, block_replication, blocksize, modification_time,
         access_time, permission, owner, group, symlink, path, fileId,
-        childrenNum, feInfo, storagePolicy, schema, stripeCellSize);
+        childrenNum, feInfo, storagePolicy, ecPolicy);
     this.locations = locations;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 7295f8c..c46081b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -201,8 +201,9 @@
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
@@ -223,7 +224,7 @@
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
 import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
@@ -1406,10 +1407,9 @@
       RpcController controller, CreateErasureCodingZoneRequestProto req)
       throws ServiceException {
     try {
-      ECSchema schema = req.hasSchema() ? PBHelper.convertECSchema(req
-          .getSchema()) : null;
-      int cellSize = req.hasCellSize() ? req.getCellSize() : 0;
-      server.createErasureCodingZone(req.getSrc(), schema, cellSize);
+      ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(req
+          .getEcPolicy()) : null;
+      server.createErasureCodingZone(req.getSrc(), ecPolicy);
       return CreateErasureCodingZoneResponseProto.newBuilder().build();
     } catch (IOException e) {
       throw new ServiceException(e);
@@ -1539,14 +1539,14 @@
   }
 
   @Override
-  public GetECSchemasResponseProto getECSchemas(RpcController controller,
-      GetECSchemasRequestProto request) throws ServiceException {
+  public GetErasureCodingPoliciesResponseProto getErasureCodingPolicies(RpcController controller,
+      GetErasureCodingPoliciesRequestProto request) throws ServiceException {
     try {
-      ECSchema[] ecSchemas = server.getECSchemas();
-      GetECSchemasResponseProto.Builder resBuilder = GetECSchemasResponseProto
+      ErasureCodingPolicy[] ecPolicies = server.getErasureCodingPolicies();
+      GetErasureCodingPoliciesResponseProto.Builder resBuilder = GetErasureCodingPoliciesResponseProto
           .newBuilder();
-      for (ECSchema ecSchema : ecSchemas) {
-        resBuilder.addSchemas(PBHelper.convertECSchema(ecSchema));
+      for (ErasureCodingPolicy ecPolicy : ecPolicies) {
+        resBuilder.addEcPolicies(PBHelper.convertErasureCodingPolicy(ecPolicy));
       }
       return resBuilder.build();
     } catch (IOException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 342da0c..4f29c4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -166,12 +166,12 @@
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@@ -183,7 +183,7 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolTranslator;
@@ -241,8 +241,8 @@
   VOID_GET_STORAGE_POLICIES_REQUEST =
       GetStoragePoliciesRequestProto.newBuilder().build();
 
-  private final static GetECSchemasRequestProto
-  VOID_GET_ECSCHEMAS_REQUEST = GetECSchemasRequestProto
+  private final static GetErasureCodingPoliciesRequestProto
+  VOID_GET_EC_POLICIES_REQUEST = GetErasureCodingPoliciesRequestProto
       .newBuilder().build();
 
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
@@ -1420,16 +1420,13 @@
   }
 
   @Override
-  public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException {
     final CreateErasureCodingZoneRequestProto.Builder builder =
         CreateErasureCodingZoneRequestProto.newBuilder();
     builder.setSrc(src);
-    if (schema != null) {
-      builder.setSchema(PBHelper.convertECSchema(schema));
-    }
-    if (cellSize > 0) {
-      builder.setCellSize(cellSize);
+    if (ecPolicy != null) {
+      builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
     }
     CreateErasureCodingZoneRequestProto req = builder.build();
     try {
@@ -1563,16 +1560,17 @@
   }
 
   @Override
-  public ECSchema[] getECSchemas() throws IOException {
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     try {
-      GetECSchemasResponseProto response = rpcProxy.getECSchemas(null,
-          VOID_GET_ECSCHEMAS_REQUEST);
-      ECSchema[] schemas = new ECSchema[response.getSchemasCount()];
+      GetErasureCodingPoliciesResponseProto response = rpcProxy
+          .getErasureCodingPolicies(null, VOID_GET_EC_POLICIES_REQUEST);
+      ErasureCodingPolicy[] ecPolicies =
+          new ErasureCodingPolicy[response.getEcPoliciesCount()];
       int i = 0;
-      for (ECSchemaProto schemaProto : response.getSchemasList()) {
-        schemas[i++] = PBHelper.convertECSchema(schemaProto);
+      for (ErasureCodingPolicyProto ecPolicyProto : response.getEcPoliciesList()) {
+        ecPolicies[i++] = PBHelper.convertErasureCodingPolicy(ecPolicyProto);
       }
-      return schemas;
+      return ecPolicies;
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index a97e2ff..c083b5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -78,6 +78,7 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -137,6 +138,7 @@
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
@@ -1348,8 +1350,7 @@
             PBHelper.convertLocatedBlockProto(lb.getLastBlock()) : null,
         lb.getIsLastBlockComplete(),
         lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : null,
-        lb.hasECSchema() ? convertECSchema(lb.getECSchema()) : null,
-        lb.hasStripeCellSize() ? lb.getStripeCellSize() : 0);
+        lb.hasEcPolicy() ? convertErasureCodingPolicy(lb.getEcPolicy()) : null);
   }
   
   public static LocatedBlocksProto convert(LocatedBlocks lb) {
@@ -1365,11 +1366,8 @@
     if (lb.getFileEncryptionInfo() != null) {
       builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
     }
-    if (lb.getECSchema() != null) {
-      builder.setECSchema(convertECSchema(lb.getECSchema()));
-    }
-    if (lb.getStripeCellSize() != 0) {
-      builder.setStripeCellSize(lb.getStripeCellSize());
+    if (lb.getErasureCodingPolicy() != null) {
+      builder.setEcPolicy(convertErasureCodingPolicy(lb.getErasureCodingPolicy()));
     }
     return builder.setFileLength(lb.getFileLength())
         .setUnderConstruction(lb.isUnderConstruction())
@@ -1514,8 +1512,7 @@
         fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
         fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
             : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
-        fs.hasEcSchema() ? PBHelper.convertECSchema(fs.getEcSchema()) : null,
-        fs.hasStripeCellSize() ? fs.getStripeCellSize() : 0);
+        fs.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(fs.getEcPolicy()) : null);
   }
 
   public static SnapshottableDirectoryStatus convert(
@@ -1576,10 +1573,9 @@
         builder.setLocations(PBHelper.convert(locations));
       }
     }
-    if(fs.getECSchema() != null) {
-      builder.setEcSchema(PBHelper.convertECSchema(fs.getECSchema()));
+    if(fs.getErasureCodingPolicy() != null) {
+      builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(fs.getErasureCodingPolicy()));
     }
-    builder.setStripeCellSize(fs.getStripeCellSize());
     return builder.build();
   }
   
@@ -3155,13 +3151,12 @@
     for (ECSchemaOptionEntryProto option : optionsList) {
       options.put(option.getKey(), option.getValue());
     }
-    return new ECSchema(schema.getSchemaName(), schema.getCodecName(),
-        schema.getDataUnits(), schema.getParityUnits(), options);
+    return new ECSchema(schema.getCodecName(), schema.getDataUnits(),
+        schema.getParityUnits(), options);
   }
 
   public static ECSchemaProto convertECSchema(ECSchema schema) {
     ECSchemaProto.Builder builder = ECSchemaProto.newBuilder()
-        .setSchemaName(schema.getSchemaName())
         .setCodecName(schema.getCodecName())
         .setDataUnits(schema.getNumDataUnits())
         .setParityUnits(schema.getNumParityUnits());
@@ -3173,17 +3168,34 @@
     return builder.build();
   }
 
+  public static ErasureCodingPolicy convertErasureCodingPolicy(
+      ErasureCodingPolicyProto policy) {
+    return new ErasureCodingPolicy(policy.getName(),
+        convertECSchema(policy.getSchema()),
+        policy.getCellSize());
+  }
+
+  public static ErasureCodingPolicyProto convertErasureCodingPolicy(
+      ErasureCodingPolicy policy) {
+    ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
+        .newBuilder()
+        .setName(policy.getName())
+        .setSchema(convertECSchema(policy.getSchema()))
+        .setCellSize(policy.getCellSize());
+    return builder.build();
+  }
+
   public static ErasureCodingZoneProto convertErasureCodingZone(
       ErasureCodingZone ecZone) {
     return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
-        .setSchema(convertECSchema(ecZone.getSchema()))
-        .setCellSize(ecZone.getCellSize()).build();
+        .setEcPolicy(convertErasureCodingPolicy(ecZone.getErasureCodingPolicy()))
+        .build();
   }
 
   public static ErasureCodingZone convertErasureCodingZone(
       ErasureCodingZoneProto ecZoneProto) {
     return new ErasureCodingZone(ecZoneProto.getDir(),
-        convertECSchema(ecZoneProto.getSchema()), ecZoneProto.getCellSize());
+        convertErasureCodingPolicy(ecZoneProto.getEcPolicy()));
   }
   
   public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
@@ -3216,12 +3228,11 @@
       liveBlkIndices[i] = liveBlockIndicesList.get(i).shortValue();
     }
 
-    ECSchema ecSchema = convertECSchema(blockEcRecoveryInfoProto.getEcSchema());
-    int cellSize = blockEcRecoveryInfoProto.getCellSize();
+    ErasureCodingPolicy ecPolicy =
+        convertErasureCodingPolicy(blockEcRecoveryInfoProto.getEcPolicy());
 
     return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
-        targetStorageUuids, convertStorageTypes, liveBlkIndices, ecSchema,
-        cellSize);
+        targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
   }
 
   public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo(
@@ -3246,8 +3257,8 @@
     short[] liveBlockIndices = blockEcRecoveryInfo.getLiveBlockIndices();
     builder.addAllLiveBlockIndices(convertIntArray(liveBlockIndices));
 
-    builder.setEcSchema(convertECSchema(blockEcRecoveryInfo.getECSchema()));
-    builder.setCellSize(blockEcRecoveryInfo.getCellSize());
+    builder.setEcPolicy(convertErasureCodingPolicy(blockEcRecoveryInfo
+        .getErasureCodingPolicy()));
 
     return builder.build();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 6674510..14d2fcc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -20,7 +20,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 
@@ -38,8 +38,7 @@
  * array to record the block index for each triplet.
  */
 public class BlockInfoStriped extends BlockInfo {
-  private final ECSchema schema;
-  private final int cellSize;
+  private final ErasureCodingPolicy ecPolicy;
   /**
    * Always the same size with triplets. Record the block index for each triplet
    * TODO: actually this is only necessary for over-replicated block. Thus can
@@ -47,36 +46,34 @@
    */
   private byte[] indices;
 
-  public BlockInfoStriped(Block blk, ECSchema schema, int cellSize) {
-    super(blk, (short) (schema.getNumDataUnits() + schema.getNumParityUnits()));
-    indices = new byte[schema.getNumDataUnits() + schema.getNumParityUnits()];
+  public BlockInfoStriped(Block blk, ErasureCodingPolicy ecPolicy) {
+    super(blk, (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()));
+    indices = new byte[ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()];
     initIndices();
-    this.schema = schema;
-    this.cellSize = cellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   BlockInfoStriped(BlockInfoStriped b) {
-    this(b, b.getSchema(), b.getCellSize());
+    this(b, b.getErasureCodingPolicy());
     this.setBlockCollection(b.getBlockCollection());
   }
 
   public short getTotalBlockNum() {
-    return (short) (this.schema.getNumDataUnits()
-        + this.schema.getNumParityUnits());
+    return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
   }
 
   public short getDataBlockNum() {
-    return (short) this.schema.getNumDataUnits();
+    return (short) ecPolicy.getNumDataUnits();
   }
 
   public short getParityBlockNum() {
-    return (short) this.schema.getNumParityUnits();
+    return (short) ecPolicy.getNumParityUnits();
   }
 
   /**
    * If the block is committed/completed and its length is less than a full
    * stripe, it returns the the number of actual data blocks.
-   * Otherwise it returns the number of data units specified by schema.
+   * Otherwise it returns the number of data units specified by erasure coding policy.
    */
   public short getRealDataBlockNum() {
     if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
@@ -91,12 +88,8 @@
     return (short) (getRealDataBlockNum() + getParityBlockNum());
   }
 
-  public ECSchema getSchema() {
-    return schema;
-  }
-
-  public int getCellSize() {
-    return cellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   private void initIndices() {
@@ -230,7 +223,7 @@
     // be the total of data blocks and parity blocks because
     // `getNumBytes` is the total of actual data block size.
     return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
-        this.schema.getNumDataUnits(), this.schema.getNumParityUnits(),
+        ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits(),
         BLOCK_STRIPED_CELL_SIZE);
     }
 
@@ -260,7 +253,7 @@
       BlockUCState s, DatanodeStorageInfo[] targets) {
     final BlockInfoStripedUnderConstruction ucBlock;
     if(isComplete()) {
-      ucBlock = new BlockInfoStripedUnderConstruction(this, schema, cellSize,
+      ucBlock = new BlockInfoStripedUnderConstruction(this, ecPolicy,
           s, targets);
       ucBlock.setBlockCollection(getBlockCollection());
     } else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
index 5f78096..9de8294 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
@@ -21,7 +21,7 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import java.io.IOException;
 
@@ -57,17 +57,16 @@
   /**
    * Constructor with null storage targets.
    */
-  public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema,
-      int cellSize) {
-    this(blk, schema, cellSize, UNDER_CONSTRUCTION, null);
+  public BlockInfoStripedUnderConstruction(Block blk, ErasureCodingPolicy ecPolicy) {
+    this(blk, ecPolicy, UNDER_CONSTRUCTION, null);
   }
 
   /**
    * Create a striped block that is currently being constructed.
    */
-  public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema,
-      int cellSize, BlockUCState state, DatanodeStorageInfo[] targets) {
-    super(blk, schema, cellSize);
+  public BlockInfoStripedUnderConstruction(Block blk, ErasureCodingPolicy ecPolicy,
+      BlockUCState state, DatanodeStorageInfo[] targets) {
+    super(blk, ecPolicy);
     assert getBlockUCState() != COMPLETE :
       "BlockInfoStripedUnderConstruction cannot be in COMPLETE state";
     this.blockUCState = state;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index eed05c5..12446c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -95,6 +95,12 @@
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+
+import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
+
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -964,14 +970,13 @@
       ErasureCodingZone ecZone)
       throws IOException {
     assert namesystem.hasReadLock();
-    final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
-    final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
+    final ErasureCodingPolicy ecPolicy = ecZone != null ? ecZone
+        .getErasureCodingPolicy() : null;
     if (blocks == null) {
       return null;
     } else if (blocks.length == 0) {
       return new LocatedBlocks(0, isFileUnderConstruction,
-          Collections.<LocatedBlock> emptyList(), null, false, feInfo, schema,
-          cellSize);
+          Collections.<LocatedBlock> emptyList(), null, false, feInfo, ecPolicy);
     } else {
       if (LOG.isDebugEnabled()) {
         LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
@@ -996,7 +1001,7 @@
       }
       return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
           isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
-          schema, cellSize);
+          ecPolicy);
     }
   }
 
@@ -1613,7 +1618,7 @@
                   .warn("Failed to get the EC zone for the file {} ", src);
             }
             if (ecZone == null) {
-              blockLog.warn("No EC schema found for the file {}. "
+              blockLog.warn("No erasure coding policy found for the file {}. "
                   + "So cannot proceed for recovery", src);
               // TODO: we may have to revisit later for what we can do better to
               // handle this case.
@@ -1623,7 +1628,7 @@
                 new ExtendedBlock(namesystem.getBlockPoolId(), block),
                 rw.srcNodes, rw.targets,
                 ((ErasureCodingWork) rw).liveBlockIndicies,
-                ecZone.getSchema(), ecZone.getCellSize());
+                ecZone.getErasureCodingPolicy());
           } else {
             rw.srcNodes[0].addBlockToBeReplicated(block, targets);
           }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 3cf9db6..21f9f39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -50,7 +50,7 @@
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.util.IntrusiveCollection;
 import org.apache.hadoop.util.Time;
 
@@ -611,10 +611,10 @@
    */
   void addBlockToBeErasureCoded(ExtendedBlock block,
       DatanodeDescriptor[] sources, DatanodeStorageInfo[] targets,
-      short[] liveBlockIndices, ECSchema ecSchema, int cellSize) {
+      short[] liveBlockIndices, ErasureCodingPolicy ecPolicy) {
     assert (block != null && sources != null && sources.length > 0);
     BlockECRecoveryInfo task = new BlockECRecoveryInfo(block, sources, targets,
-        liveBlockIndices, ecSchema, cellSize);
+        liveBlockIndices, ecPolicy);
     erasurecodeBlocks.offer(task);
     BlockManager.LOG.debug("Adding block recovery task " + task + "to "
         + getName() + ", current queue size is " + erasurecodeBlocks.size());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index 5c8dd85..7873459 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -54,7 +54,7 @@
       // drop any (illegal) authority in the URI for backwards compatibility
       this.file = new File(uri.getPath());
     } else {
-      throw new IllegalArgumentException("Unsupported URI schema in " + uri);
+      throw new IllegalArgumentException("Unsupported URI ecPolicy in " + uri);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 3c9adc4..f6a5ece 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -69,8 +69,7 @@
 import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResult;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
@@ -267,10 +266,10 @@
         new ExecutorCompletionService<>(STRIPED_READ_THREAD_POOL);
 
     ReconstructAndTransferBlock(BlockECRecoveryInfo recoveryInfo) {
-      ECSchema schema = recoveryInfo.getECSchema();
-      dataBlkNum = schema.getNumDataUnits();
-      parityBlkNum = schema.getNumParityUnits();
-      cellSize = recoveryInfo.getCellSize();
+      ErasureCodingPolicy ecPolicy = recoveryInfo.getErasureCodingPolicy();
+      dataBlkNum = ecPolicy.getNumDataUnits();
+      parityBlkNum = ecPolicy.getNumParityUnits();
+      cellSize = ecPolicy.getCellSize();
 
       blockGroup = recoveryInfo.getExtendedBlock();
       final int cellsNum = (int)((blockGroup.getNumBytes() - 1) / cellSize + 1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index ea6ba54..34d92d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -45,7 +45,7 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
@@ -176,7 +176,7 @@
   }
 
   DBlock newDBlock(LocatedBlock lb, List<MLocation> locations,
-                   ECSchema ecSchema) {
+                   ErasureCodingPolicy ecPolicy) {
     Block blk = lb.getBlock().getLocalBlock();
     DBlock db;
     if (lb.isStriped()) {
@@ -185,7 +185,7 @@
       for (int i = 0; i < indices.length; i++) {
         indices[i] = (byte) lsb.getBlockIndices()[i];
       }
-      db = new DBlockStriped(blk, indices, (short) ecSchema.getNumDataUnits());
+      db = new DBlockStriped(blk, indices, (short) ecPolicy.getNumDataUnits());
     } else {
       db = new DBlock(blk);
     }
@@ -374,7 +374,7 @@
       List<StorageType> types = policy.chooseStorageTypes(
           status.getReplication());
 
-      final ECSchema ecSchema = status.getECSchema();
+      final ErasureCodingPolicy ecPolicy = status.getErasureCodingPolicy();
       final LocatedBlocks locatedBlocks = status.getBlockLocations();
       final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
       List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
@@ -390,7 +390,7 @@
         final StorageTypeDiff diff = new StorageTypeDiff(types,
             lb.getStorageTypes());
         if (!diff.removeOverlap(true)) {
-          if (scheduleMoves4Block(diff, lb, ecSchema)) {
+          if (scheduleMoves4Block(diff, lb, ecPolicy)) {
             result.updateHasRemaining(diff.existing.size() > 1
                 && diff.expected.size() > 1);
             // One block scheduled successfully, set noBlockMoved to false
@@ -403,12 +403,12 @@
     }
 
     boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb,
-                                ECSchema ecSchema) {
+                                ErasureCodingPolicy ecPolicy) {
       final List<MLocation> locations = MLocation.toLocations(lb);
       if (!(lb instanceof LocatedStripedBlock)) {
         Collections.shuffle(locations);
       }
-      final DBlock db = newDBlock(lb, locations, ecSchema);
+      final DBlock db = newDBlock(lb, locations, ecPolicy);
 
       for (final StorageType t : diff.existing) {
         for (final MLocation ml : locations) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
new file mode 100644
index 0000000..71ac36a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * This manages erasure coding policies predefined and activated in the system.
+ * It loads customized policies and syncs with persisted ones in
+ * NameNode image.
+ *
+ * This class is instantiated by the FSNamesystem.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+public final class ErasureCodingPolicyManager {
+
+  /**
+   * TODO: HDFS-8095
+   */
+  private static final int DEFAULT_DATA_BLOCKS = 6;
+  private static final int DEFAULT_PARITY_BLOCKS = 3;
+  private static final int DEFAULT_CELLSIZE = 64 * 1024;
+  private static final String DEFAULT_CODEC_NAME = "rs";
+  private static final String DEFAULT_POLICY_NAME = "RS-6-3-64k";
+  private static final ECSchema SYS_DEFAULT_SCHEMA = new ECSchema(
+      DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
+  private static final ErasureCodingPolicy SYS_DEFAULT_POLICY =
+      new ErasureCodingPolicy(DEFAULT_POLICY_NAME, SYS_DEFAULT_SCHEMA,
+      DEFAULT_CELLSIZE);
+
+  //We may add more later.
+  private static ErasureCodingPolicy[] SYS_POLICY = new ErasureCodingPolicy[] {
+      SYS_DEFAULT_POLICY
+  };
+
+  /**
+   * All active policies maintained in NN memory for fast querying,
+   * identified and sorted by its name.
+   */
+  private final Map<String, ErasureCodingPolicy> activePolicies;
+
+  ErasureCodingPolicyManager() {
+
+    this.activePolicies = new TreeMap<>();
+    for (ErasureCodingPolicy policy : SYS_POLICY) {
+      activePolicies.put(policy.getName(), policy);
+    }
+
+    /**
+     * TODO: HDFS-7859 persist into NameNode
+     * load persistent policies from image and editlog, which is done only once
+     * during NameNode startup. This can be done here or in a separate method.
+     */
+  }
+
+  /**
+   * Get system defined policies.
+   * @return system policies
+   */
+  public static ErasureCodingPolicy[] getSystemPolices() {
+    return SYS_POLICY;
+  }
+
+  /**
+   * Get system-wide default policy, which can be used by default
+   * when no policy is specified for an EC zone.
+   * @return ecPolicy
+   */
+  public static ErasureCodingPolicy getSystemDefaultPolicy() {
+    return SYS_DEFAULT_POLICY;
+  }
+
+  /**
+   * Get all policies that's available to use.
+   * @return all policies
+   */
+  public ErasureCodingPolicy[] getPolicies() {
+    ErasureCodingPolicy[] results = new ErasureCodingPolicy[activePolicies.size()];
+    return activePolicies.values().toArray(results);
+  }
+
+  /**
+   * Get the policy specified by the policy name.
+   */
+  public ErasureCodingPolicy getPolicy(String name) {
+    return activePolicies.get(name);
+  }
+
+  /**
+   * Clear and clean up
+   */
+  public void clear() {
+    activePolicies.clear();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingSchemaManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingSchemaManager.java
deleted file mode 100644
index 4c4aae9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingSchemaManager.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * This manages EC schemas predefined and activated in the system.
- * It loads customized schemas and syncs with persisted ones in
- * NameNode image.
- *
- * This class is instantiated by the FSNamesystem.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS"})
-public final class ErasureCodingSchemaManager {
-
-  /**
-   * TODO: HDFS-8095
-   */
-  private static final int DEFAULT_DATA_BLOCKS = 6;
-  private static final int DEFAULT_PARITY_BLOCKS = 3;
-  private static final String DEFAULT_CODEC_NAME = "rs";
-  private static final String DEFAULT_SCHEMA_NAME = "RS-6-3";
-  private static final ECSchema SYS_DEFAULT_SCHEMA =
-      new ECSchema(DEFAULT_SCHEMA_NAME,
-               DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
-
-  //We may add more later.
-  private static ECSchema[] SYS_SCHEMAS = new ECSchema[] {
-      SYS_DEFAULT_SCHEMA
-  };
-
-  /**
-   * All active EC activeSchemas maintained in NN memory for fast querying,
-   * identified and sorted by its name.
-   */
-  private final Map<String, ECSchema> activeSchemas;
-
-  ErasureCodingSchemaManager() {
-
-    this.activeSchemas = new TreeMap<String, ECSchema>();
-    for (ECSchema schema : SYS_SCHEMAS) {
-      activeSchemas.put(schema.getSchemaName(), schema);
-    }
-
-    /**
-     * TODO: HDFS-7859 persist into NameNode
-     * load persistent schemas from image and editlog, which is done only once
-     * during NameNode startup. This can be done here or in a separate method.
-     */
-  }
-
-  /**
-   * Get system defined schemas.
-   * @return system schemas
-   */
-  public static ECSchema[] getSystemSchemas() {
-    return SYS_SCHEMAS;
-  }
-
-  /**
-   * Get system-wide default EC schema, which can be used by default when no
-   * schema is specified for an EC zone.
-   * @return schema
-   */
-  public static ECSchema getSystemDefaultSchema() {
-    return SYS_DEFAULT_SCHEMA;
-  }
-
-  /**
-   * Tell the specified schema is the system default one or not.
-   * @param schema
-   * @return true if it's the default false otherwise
-   */
-  public static boolean isSystemDefault(ECSchema schema) {
-    if (schema == null) {
-      throw new IllegalArgumentException("Invalid schema parameter");
-    }
-
-    // schema name is the identifier.
-    return SYS_DEFAULT_SCHEMA.getSchemaName().equals(schema.getSchemaName());
-  }
-
-  /**
-   * Get all EC schemas that's available to use.
-   * @return all EC schemas
-   */
-  public ECSchema[] getSchemas() {
-    ECSchema[] results = new ECSchema[activeSchemas.size()];
-    return activeSchemas.values().toArray(results);
-  }
-
-  /**
-   * Get the EC schema specified by the schema name.
-   * @param schemaName
-   * @return EC schema specified by the schema name
-   */
-  public ECSchema getSchema(String schemaName) {
-    return activeSchemas.get(schemaName);
-  }
-
-  /**
-   * Clear and clean up
-   */
-  public void clear() {
-    activeSchemas.clear();
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
index 2638126..22d821f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
@@ -23,11 +23,10 @@
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -60,9 +59,9 @@
     this.dir = dir;
   }
 
-  ECSchema getErasureCodingSchema(INodesInPath iip) throws IOException {
+  ErasureCodingPolicy getErasureCodingPolicy(INodesInPath iip) throws IOException {
     ErasureCodingZone ecZone = getErasureCodingZone(iip);
-    return ecZone == null ? null : ecZone.getSchema();
+    return ecZone == null ? null : ecZone.getErasureCodingPolicy();
   }
 
   ErasureCodingZone getErasureCodingZone(INodesInPath iip) throws IOException {
@@ -88,12 +87,11 @@
         if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) {
           ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue());
           DataInputStream dIn=new DataInputStream(bIn);
-          int cellSize = WritableUtils.readVInt(dIn);
-          String schemaName = WritableUtils.readString(dIn);
-          ECSchema schema = dir.getFSNamesystem()
-              .getErasureCodingSchemaManager().getSchema(schemaName);
+          String ecPolicyName = WritableUtils.readString(dIn);
+          ErasureCodingPolicy ecPolicy = dir.getFSNamesystem()
+              .getErasureCodingPolicyManager().getPolicy(ecPolicyName);
           return new ErasureCodingZone(dir.getInode(inode.getId())
-              .getFullPathName(), schema, cellSize);
+              .getFullPathName(), ecPolicy);
         }
       }
     }
@@ -101,7 +99,7 @@
   }
 
   List<XAttr> createErasureCodingZone(final INodesInPath srcIIP,
-      ECSchema schema, int cellSize) throws IOException {
+      ErasureCodingPolicy ecPolicy) throws IOException {
     assert dir.hasWriteLock();
     Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
     String src = srcIIP.getPath();
@@ -115,29 +113,22 @@
       throw new IOException("Attempt to create an erasure coding zone " +
           "for a file " + src);
     }
-    if (getErasureCodingSchema(srcIIP) != null) {
+    if (getErasureCodingPolicy(srcIIP) != null) {
       throw new IOException("Directory " + src + " is already in an " +
           "erasure coding zone.");
     }
 
-    // System default schema will be used since no specified.
-    if (schema == null) {
-      schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
+    // System default erasure coding policy will be used since no specified.
+    if (ecPolicy == null) {
+      ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
     }
 
-    if (cellSize <= 0) {
-      cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
-    }
-
-    // Write the cellsize first and then schema name
     final XAttr ecXAttr;
     DataOutputStream dOut = null;
     try {
       ByteArrayOutputStream bOut = new ByteArrayOutputStream();
       dOut = new DataOutputStream(bOut);
-      WritableUtils.writeVInt(dOut, cellSize);
-      // Now persist the schema name in xattr
-      WritableUtils.writeString(dOut, schema.getSchemaName());
+      WritableUtils.writeString(dOut, ecPolicy.getName());
       ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE,
           bOut.toByteArray());
     } finally {
@@ -158,10 +149,12 @@
     if (srcZone != null && srcZone.getDir().equals(src) && dstZone == null) {
       return;
     }
-    final ECSchema srcSchema = (srcZone != null) ? srcZone.getSchema() : null;
-    final ECSchema dstSchema = (dstZone != null) ? dstZone.getSchema() : null;
-    if ((srcSchema != null && !srcSchema.equals(dstSchema)) ||
-        (dstSchema != null && !dstSchema.equals(srcSchema))) {
+    final ErasureCodingPolicy srcECPolicy =
+        srcZone != null ? srcZone.getErasureCodingPolicy() : null;
+    final ErasureCodingPolicy dstECPolicy =
+        dstZone != null ? dstZone.getErasureCodingPolicy() : null;
+    if (srcECPolicy != null && !srcECPolicy.equals(dstECPolicy) ||
+        dstECPolicy != null && !dstECPolicy.equals(srcECPolicy)) {
       throw new IOException(
           src + " can't be moved because the source and destination have " +
               "different erasure coding policies.");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index fd7ef33..8c515d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -22,9 +22,9 @@
 
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /**
  * Helper class to perform erasure coding related operations.
@@ -43,15 +43,14 @@
    * @param fsn namespace
    * @param srcArg the path of a directory which will be the root of the
    *          erasure coding zone. The directory must be empty.
-   * @param schema ECSchema for the erasure coding zone
-   * @param cellSize Cell size of stripe
+   * @param ecPolicy erasure coding policy for the erasure coding zone
    * @param logRetryCache whether to record RPC ids in editlog for retry
    *          cache rebuilding
    * @return {@link HdfsFileStatus}
    * @throws IOException
    */
   static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
-      final String srcArg, final ECSchema schema, final int cellSize,
+      final String srcArg, final ErasureCodingPolicy ecPolicy,
       final boolean logRetryCache) throws IOException {
     assert fsn.hasWriteLock();
 
@@ -68,7 +67,7 @@
     try {
       iip = fsd.getINodesInPath4Write(src, false);
       xAttrs = fsn.getErasureCodingZoneManager().createErasureCodingZone(
-          iip, schema, cellSize);
+          iip, ecPolicy);
     } finally {
       fsd.writeUnlock();
     }
@@ -120,7 +119,7 @@
     assert fsn.hasReadLock();
 
     final INodesInPath iip = getINodesInPath(fsn, srcArg);
-    return getErasureCodingSchemaForPath(fsn, iip) != null;
+    return getErasureCodingPolicyForPath(fsn, iip) != null;
   }
 
   /**
@@ -133,49 +132,35 @@
    */
   static boolean isInErasureCodingZone(final FSNamesystem fsn,
       final INodesInPath iip) throws IOException {
-    return getErasureCodingSchema(fsn, iip) != null;
+    return getErasureCodingPolicy(fsn, iip) != null;
   }
 
   /**
-   * Get erasure coding schema.
+   * Get the erasure coding policy.
    *
    * @param fsn namespace
    * @param iip inodes in the path containing the file
-   * @return {@link ECSchema}
+   * @return {@link ErasureCodingPolicy}
    * @throws IOException
    */
-  static ECSchema getErasureCodingSchema(final FSNamesystem fsn,
+  static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
       final INodesInPath iip) throws IOException {
     assert fsn.hasReadLock();
 
-    return getErasureCodingSchemaForPath(fsn, iip);
+    return getErasureCodingPolicyForPath(fsn, iip);
   }
 
   /**
-   * Get available erasure coding schemas.
+   * Get available erasure coding polices.
    *
    * @param fsn namespace
-   * @return {@link ECSchema} array
+   * @return {@link ErasureCodingPolicy} array
    */
-  static ECSchema[] getErasureCodingSchemas(final FSNamesystem fsn)
+  static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
       throws IOException {
     assert fsn.hasReadLock();
 
-    return fsn.getErasureCodingSchemaManager().getSchemas();
-  }
-
-  /**
-   * Get the ECSchema specified by the name.
-   *
-   * @param fsn namespace
-   * @param schemaName schema name
-   * @return {@link ECSchema}
-   */
-  static ECSchema getErasureCodingSchema(final FSNamesystem fsn,
-      final String schemaName) throws IOException {
-    assert fsn.hasReadLock();
-
-    return fsn.getErasureCodingSchemaManager().getSchema(schemaName);
+    return fsn.getErasureCodingPolicyManager().getPolicies();
   }
 
   private static INodesInPath getINodesInPath(final FSNamesystem fsn,
@@ -204,12 +189,12 @@
     }
   }
 
-  private static ECSchema getErasureCodingSchemaForPath(final FSNamesystem fsn,
+  private static ErasureCodingPolicy getErasureCodingPolicyForPath(final FSNamesystem fsn,
       final INodesInPath iip) throws IOException {
     final FSDirectory fsd = fsn.getFSDirectory();
     fsd.readLock();
     try {
-      return fsn.getErasureCodingZoneManager().getErasureCodingSchema(iip);
+      return fsn.getErasureCodingZoneManager().getErasureCodingPolicy(iip);
     } finally {
       fsd.readUnlock();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index c4cfd34..6ec97c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -42,7 +43,6 @@
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -379,7 +379,7 @@
       if (fsd.getINode4DotSnapshot(srcs) != null) {
         return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
             HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-            HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0);
+            HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
       }
       return null;
     }
@@ -449,8 +449,8 @@
 
     final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
         fsd.getFSNamesystem(), iip);
-    final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
-    final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
+    final ErasureCodingPolicy ecPolicy =
+        ecZone != null ? ecZone.getErasureCodingPolicy() : null;
 
     if (node.isFile()) {
       final INodeFile fileNode = node.asFile();
@@ -482,8 +482,7 @@
         childrenNum,
         feInfo,
         storagePolicy,
-        schema,
-        cellSize);
+        ecPolicy);
   }
 
   private static INodeAttributes getINodeAttributes(
@@ -532,8 +531,8 @@
     }
     int childrenNum = node.isDirectory() ?
         node.asDirectory().getChildrenNum(snapshot) : 0;
-    final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
-    final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
+    final ErasureCodingPolicy ecPolicy =
+        ecZone != null ? ecZone.getErasureCodingPolicy() : null;
 
     HdfsLocatedFileStatus status =
         new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
@@ -542,8 +541,7 @@
           getPermissionForFileStatus(nodeAttrs, isEncrypted),
           nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
           node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
-          node.getId(), loc, childrenNum, feInfo, storagePolicy, schema,
-          cellSize);
+          node.getId(), loc, childrenNum, feInfo, storagePolicy, ecPolicy);
     // Set caching information for the located blocks.
     if (loc != null) {
       CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 9121ec6..2cce798 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -51,7 +52,6 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ChunkedArrayList;
@@ -532,16 +532,15 @@
       if (isStriped) {
         ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
             fsd.getFSNamesystem(), inodesInPath);
-        ECSchema ecSchema = ecZone.getSchema();
-        short numDataUnits = (short) ecSchema.getNumDataUnits();
-        short numParityUnits = (short) ecSchema.getNumParityUnits();
+        ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
+        short numDataUnits = (short) ecPolicy.getNumDataUnits();
+        short numParityUnits = (short) ecPolicy.getNumParityUnits();
         short numLocations = (short) (numDataUnits + numParityUnits);
 
         // check quota limits and updated space consumed
         fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
             numLocations, true);
-        blockInfo = new BlockInfoStripedUnderConstruction(block, ecSchema,
-            ecZone.getCellSize(),
+        blockInfo = new BlockInfoStripedUnderConstruction(block, ecPolicy,
             HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
       } else {
         // check quota limits and updated space consumed
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 19569c6..1a40bd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -994,7 +994,7 @@
     boolean isStriped = ecZone != null;
     if (isStriped) {
       newBlockInfo = new BlockInfoStripedUnderConstruction(newBlock,
-          ecZone.getSchema(), ecZone.getCellSize());
+          ecZone.getErasureCodingPolicy());
     } else {
       newBlockInfo = new BlockInfoContiguousUnderConstruction(newBlock,
           file.getPreferredBlockReplication());
@@ -1080,7 +1080,7 @@
           // until several blocks in?
           if (isStriped) {
             newBI = new BlockInfoStripedUnderConstruction(newBlock,
-                ecZone.getSchema(), ecZone.getCellSize());
+                ecZone.getErasureCodingPolicy());
           } else {
             newBI = new BlockInfoContiguousUnderConstruction(newBlock,
                 file.getPreferredBlockReplication());
@@ -1090,11 +1090,9 @@
           // is only executed when loading edits written by prior
           // versions of Hadoop. Current versions always log
           // OP_ADD operations as each block is allocated.
-          // TODO: ECSchema can be restored from persisted file (HDFS-7859).
           if (isStriped) {
             newBI = new BlockInfoStriped(newBlock,
-                ErasureCodingSchemaManager.getSystemDefaultSchema(),
-                ecZone.getCellSize());
+                ErasureCodingPolicyManager.getSystemDefaultPolicy());
           } else {
             newBI = new BlockInfoContiguous(newBlock,
                 file.getPreferredBlockReplication());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index c4fc1ce..51b04d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
@@ -70,7 +71,6 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.protobuf.ByteString;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 @InterfaceAudience.Private
 public final class FSImageFormatPBINode {
@@ -336,17 +336,13 @@
       short replication = (short) f.getReplication();
       boolean isStriped = f.getIsStriped();
       LoaderContext state = parent.getLoaderContext();
-      ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
+      ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
 
-      if (isStriped) {
-        Preconditions.checkState(f.hasStripingCellSize());
-      }
       BlockInfo[] blocks = new BlockInfo[bp.size()];
       for (int i = 0; i < bp.size(); ++i) {
         BlockProto b = bp.get(i);
         if (isStriped) {
-          blocks[i] = new BlockInfoStriped(PBHelper.convert(b), schema,
-              (int)f.getStripingCellSize());
+          blocks[i] = new BlockInfoStriped(PBHelper.convert(b), ecPolicy);
         } else {
           blocks[i] = new BlockInfoContiguous(PBHelper.convert(b),
               replication);
@@ -382,8 +378,7 @@
           final BlockInfo ucBlk;
           if (isStriped) {
             BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
-            ucBlk = new BlockInfoStripedUnderConstruction(striped,
-                schema, (int)f.getStripingCellSize());
+            ucBlk = new BlockInfoStripedUnderConstruction(striped, ecPolicy);
           } else {
             ucBlk = new BlockInfoContiguousUnderConstruction(lastBlk,
                 replication);
@@ -665,16 +660,6 @@
         }
       }
 
-      if (n.isStriped()) {
-        if (blocks != null && blocks.length > 0) {
-          BlockInfo firstBlock = blocks[0];
-          Preconditions.checkState(firstBlock.isStriped());
-          b.setStripingCellSize(((BlockInfoStriped)firstBlock).getCellSize());
-        } else {
-          b.setStripingCellSize(HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
-        }
-      }
-
       FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
       if (uc != null) {
         INodeSection.FileUnderConstructionFeature f =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2634937..21533ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -176,6 +176,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -256,7 +257,6 @@
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RetryCache;
 import org.apache.hadoop.ipc.Server;
@@ -420,7 +420,7 @@
   private final BlockManager blockManager;
   private final SnapshotManager snapshotManager;
   private final CacheManager cacheManager;
-  private final ErasureCodingSchemaManager ecSchemaManager;
+  private final ErasureCodingPolicyManager ecPolicyManager;
   private final DatanodeStatistics datanodeStatistics;
 
   private String nameserviceId;
@@ -597,7 +597,7 @@
     leaseManager.removeAllLeases();
     snapshotManager.clearSnapshottableDirs();
     cacheManager.clear();
-    ecSchemaManager.clear();
+    ecPolicyManager.clear();
     setImageLoaded(false);
     blockManager.clear();
   }
@@ -835,7 +835,7 @@
       this.dir = new FSDirectory(this, conf);
       this.snapshotManager = new SnapshotManager(dir);
       this.cacheManager = new CacheManager(this, conf, blockManager);
-      this.ecSchemaManager = new ErasureCodingSchemaManager();
+      this.ecPolicyManager = new ErasureCodingPolicyManager();
       this.safeMode = new SafeModeInfo(conf);
       this.topConf = new TopConf(conf);
       this.auditLoggers = initAuditLoggers(conf);
@@ -3210,16 +3210,16 @@
     if (fileINode.isStriped()) {
       final ErasureCodingZone ecZone = FSDirErasureCodingOp
           .getErasureCodingZone(this, iip);
-      final ECSchema ecSchema = ecZone.getSchema();
-      final short numDataUnits = (short) ecSchema.getNumDataUnits();
-      final short numParityUnits = (short) ecSchema.getNumParityUnits();
+      final ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
+      final short numDataUnits = (short) ecPolicy.getNumDataUnits();
+      final short numParityUnits = (short) ecPolicy.getNumParityUnits();
 
       final long numBlocks = numDataUnits + numParityUnits;
       final long fullBlockGroupSize =
           fileINode.getPreferredBlockSize() * numBlocks;
 
       final BlockInfoStriped striped = new BlockInfoStriped(commitBlock,
-          ecSchema, ecZone.getCellSize());
+          ecPolicy);
       final long actualBlockGroupSize = striped.spaceConsumed();
 
       diff = fullBlockGroupSize - actualBlockGroupSize;
@@ -6211,9 +6211,9 @@
     return cacheManager;
   }
 
-  /** @return the ErasureCodingSchemaManager. */
-  public ErasureCodingSchemaManager getErasureCodingSchemaManager() {
-    return ecSchemaManager;
+  /** @return the ErasureCodingPolicyManager. */
+  public ErasureCodingPolicyManager getErasureCodingPolicyManager() {
+    return ecPolicyManager;
   }
 
   /** @return the ErasureCodingZoneManager. */
@@ -7170,14 +7170,13 @@
    * Create an erasure coding zone on directory src.
    * @param srcArg  the path of a directory which will be the root of the
    *                erasure coding zone. The directory must be empty.
-   * @param schema  ECSchema for the erasure coding zone
-   * @param cellSize Cell size of stripe 
+   * @param ecPolicy  erasure coding policy for the erasure coding zone
    * @throws AccessControlException  if the caller is not the superuser.
    * @throws UnresolvedLinkException if the path can't be resolved.
    * @throws SafeModeException       if the Namenode is in safe mode.
    */
-  void createErasureCodingZone(final String srcArg, final ECSchema schema,
-      int cellSize, final boolean logRetryCache) throws IOException,
+  void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy
+      ecPolicy, final boolean logRetryCache) throws IOException,
       UnresolvedLinkException, SafeModeException, AccessControlException {
     checkSuperuserPrivilege();
     checkOperation(OperationCategory.WRITE);
@@ -7188,7 +7187,7 @@
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot create erasure coding zone on " + srcArg);
       resultingStat = FSDirErasureCodingOp.createErasureCodingZone(this,
-          srcArg, schema, cellSize, logRetryCache);
+          srcArg, ecPolicy, logRetryCache);
       success = true;
     } finally {
       writeUnlock();
@@ -7216,30 +7215,15 @@
   }
 
   /**
-   * Get available erasure coding schemas
+   * Get available erasure coding polices
    */
-  ECSchema[] getErasureCodingSchemas() throws IOException {
+  ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     checkOperation(OperationCategory.READ);
     waitForLoadingFSImage();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      return FSDirErasureCodingOp.getErasureCodingSchemas(this);
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
-   * Get the ECSchema specified by the name
-   */
-  ECSchema getErasureCodingSchema(String schemaName) throws IOException {
-    checkOperation(OperationCategory.READ);
-    waitForLoadingFSImage();
-    readLock();
-    try {
-      checkOperation(OperationCategory.READ);
-      return FSDirErasureCodingOp.getErasureCodingSchema(this, schemaName);
+      return FSDirErasureCodingOp.getErasureCodingPolicies(this);
     } finally {
       readUnlock();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index bee06a5..ab29e4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -84,6 +84,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -144,7 +145,6 @@
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RetriableException;
@@ -1849,7 +1849,7 @@
   }
 
   @Override // ClientProtocol
-  public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException {
     checkNNStartup();
     final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -1858,8 +1858,7 @@
     }
     boolean success = false;
     try {
-      namesystem.createErasureCodingZone(src, schema, cellSize,
-          cacheEntry != null);
+      namesystem.createErasureCodingZone(src, ecPolicy, cacheEntry != null);
       success = true;
     } finally {
       RetryCache.setState(cacheEntry, success);
@@ -2063,9 +2062,9 @@
   }
 
   @Override // ClientProtocol
-  public ECSchema[] getECSchemas() throws IOException {
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     checkNNStartup();
-    return namesystem.getErasureCodingSchemas();
+    return namesystem.getErasureCodingPolicies();
   }
 
   @Override // ClientProtocol
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 21062e2..46e2b98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -573,7 +573,7 @@
 
       // count expected replicas
       short targetFileReplication;
-      if (file.getECSchema() != null) {
+      if (file.getErasureCodingPolicy() != null) {
         assert storedBlock instanceof BlockInfoStriped;
         targetFileReplication = ((BlockInfoStriped) storedBlock)
             .getRealTotalBlockNum();
@@ -1159,11 +1159,11 @@
 
   @VisibleForTesting
   static class ErasureCodingResult extends Result {
-    final String defaultSchema;
+    final String defaultECPolicy;
 
     ErasureCodingResult(Configuration conf) {
-      defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema()
-          .getSchemaName();
+      defaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy()
+          .getName();
     }
 
     @Override
@@ -1240,7 +1240,7 @@
             ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks))
             .append(" %)");
       }
-      res.append("\n Default schema:\t\t").append(defaultSchema)
+      res.append("\n Default ecPolicy:\t\t").append(defaultECPolicy)
           .append("\n Average block group size:\t").append(
           getReplicationFactor()).append("\n Missing block groups:\t\t").append(
           missingIds.size()).append("\n Corrupt block groups:\t\t").append(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java
index 56a1546..55ae7b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import java.util.Arrays;
 import java.util.Collection;
@@ -77,31 +77,28 @@
     private String[] targetStorageIDs;
     private StorageType[] targetStorageTypes;
     private final short[] liveBlockIndices;
-    private final ECSchema ecSchema;
-    private final int cellSize;
+    private final ErasureCodingPolicy ecPolicy;
 
     public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
         DatanodeStorageInfo[] targetDnStorageInfo, short[] liveBlockIndices,
-        ECSchema ecSchema, int cellSize) {
+        ErasureCodingPolicy ecPolicy) {
       this(block, sources, DatanodeStorageInfo
           .toDatanodeInfos(targetDnStorageInfo), DatanodeStorageInfo
           .toStorageIDs(targetDnStorageInfo), DatanodeStorageInfo
-          .toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecSchema,
-          cellSize);
+          .toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecPolicy);
     }
 
     public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
         DatanodeInfo[] targets, String[] targetStorageIDs,
         StorageType[] targetStorageTypes, short[] liveBlockIndices,
-        ECSchema ecSchema, int cellSize) {
+        ErasureCodingPolicy ecPolicy) {
       this.block = block;
       this.sources = sources;
       this.targets = targets;
       this.targetStorageIDs = targetStorageIDs;
       this.targetStorageTypes = targetStorageTypes;
       this.liveBlockIndices = liveBlockIndices;
-      this.ecSchema = ecSchema;
-      this.cellSize = cellSize;
+      this.ecPolicy = ecPolicy;
     }
 
     public ExtendedBlock getExtendedBlock() {
@@ -128,12 +125,8 @@
       return liveBlockIndices;
     }
     
-    public ECSchema getECSchema() {
-      return ecSchema;
-    }
-
-    public int getCellSize() {
-      return cellSize;
+    public ErasureCodingPolicy getErasureCodingPolicy() {
+      return ecPolicy;
     }
 
     @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
index 03026d8..f3260da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
@@ -31,9 +31,8 @@
 import org.apache.hadoop.fs.shell.PathData;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -49,7 +48,7 @@
     factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
     factory.addClass(GetECZoneCommand.class, "-"
         + GetECZoneCommand.NAME);
-    factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME);
+    factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME);
   }
 
   @Override
@@ -77,35 +76,24 @@
   }
 
   /**
-   * Create EC encoding zone command. Zones are created to use specific EC
-   * encoding schema, other than default while encoding the files under some
-   * specific directory.
+   * A command to create an EC zone for a path, with a erasure coding policy name.
    */
   static class CreateECZoneCommand extends ECCommand {
     public static final String NAME = "createZone";
-    public static final String USAGE = "[-s <schemaName>] [-c <cellSize>] <path>";
+    public static final String USAGE = "[-s <policyName>] <path>";
     public static final String DESCRIPTION = 
-        "Create a zone to encode files using a specified schema\n"
+        "Create a zone to encode files using a specified policy\n"
         + "Options :\n"
-        + "  -s <schemaName> : EC schema name to encode files. "
-        + "If not passed default schema will be used\n"
-        + "  -c <cellSize> : cell size to use for striped encoding files."
-        + " If not passed default cellsize of "
-        + HdfsConstants.BLOCK_STRIPED_CELL_SIZE + " will be used\n"
+        + "  -s <policyName> : erasure coding policy name to encode files. "
+        + "If not passed the default policy will be used\n"
         + "  <path>  : Path to an empty directory. Under this directory "
-        + "files will be encoded using specified schema";
-    private String schemaName;
-    private int cellSize = 0;
-    private ECSchema schema = null;
+        + "files will be encoded using specified erasure coding policy";
+    private String ecPolicyName;
+    private ErasureCodingPolicy ecPolicy = null;
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
-      schemaName = StringUtils.popOptionWithArgument("-s", args);
-      String cellSizeStr = StringUtils.popOptionWithArgument("-c", args);
-      if (cellSizeStr != null) {
-        cellSize = (int) StringUtils.TraditionalBinaryPrefix
-            .string2long(cellSizeStr);
-      }
+      ecPolicyName = StringUtils.popOptionWithArgument("-s", args);
       if (args.isEmpty()) {
         throw new HadoopIllegalArgumentException("<path> is missing");
       }
@@ -119,29 +107,29 @@
       super.processPath(item);
       DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
       try {
-        if (schemaName != null) {
-          ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
-          for (ECSchema ecSchema : ecSchemas) {
-            if (schemaName.equals(ecSchema.getSchemaName())) {
-              schema = ecSchema;
+        if (ecPolicyName != null) {
+          ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
+          for (ErasureCodingPolicy ecPolicy : ecPolicies) {
+            if (ecPolicyName.equals(ecPolicy.getName())) {
+              this.ecPolicy = ecPolicy;
               break;
             }
           }
-          if (schema == null) {
+          if (ecPolicy == null) {
             StringBuilder sb = new StringBuilder();
-            sb.append("Schema '");
-            sb.append(schemaName);
-            sb.append("' does not match any of the supported schemas.");
+            sb.append("Policy '");
+            sb.append(ecPolicyName);
+            sb.append("' does not match any of the supported policies.");
             sb.append(" Please select any one of ");
-            List<String> schemaNames = new ArrayList<String>();
-            for (ECSchema ecSchema : ecSchemas) {
-              schemaNames.add(ecSchema.getSchemaName());
+            List<String> ecPolicyNames = new ArrayList<String>();
+            for (ErasureCodingPolicy ecPolicy : ecPolicies) {
+              ecPolicyNames.add(ecPolicy.getName());
             }
-            sb.append(schemaNames);
+            sb.append(ecPolicyNames);
             throw new HadoopIllegalArgumentException(sb.toString());
           }
         }
-        dfs.createErasureCodingZone(item.path, schema, cellSize);
+        dfs.createErasureCodingZone(item.path, ecPolicy);
         out.println("EC Zone created successfully at " + item.path);
       } catch (IOException e) {
         throw new IOException("Unable to create EC zone for the path "
@@ -188,13 +176,13 @@
   }
 
   /**
-   * List all supported EC Schemas
+   * List all supported erasure coding policies
    */
-  static class ListECSchemas extends ECCommand {
-    public static final String NAME = "listSchemas";
+  static class ListPolicies extends ECCommand {
+    public static final String NAME = "listPolicies";
     public static final String USAGE = "";
     public static final String DESCRIPTION = 
-        "Get the list of ECSchemas supported\n";
+        "Get the list of erasure coding policies supported\n";
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
@@ -209,14 +197,14 @@
       }
       DistributedFileSystem dfs = (DistributedFileSystem) fs;
 
-      ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
+      ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
       StringBuilder sb = new StringBuilder();
       int i = 0;
-      while (i < ecSchemas.length) {
-        ECSchema ecSchema = ecSchemas[i];
-        sb.append(ecSchema.getSchemaName());
+      while (i < ecPolicies.length) {
+        ErasureCodingPolicy ecPolicy = ecPolicies[i];
+        sb.append(ecPolicy.getName());
         i++;
-        if (i < ecSchemas.length) {
+        if (i < ecPolicies.length) {
           sb.append(", ");
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 4dc94a0..4fded73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -31,7 +31,7 @@
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.security.token.Token;
 
@@ -318,7 +318,7 @@
   }
 
   /**
-   * Decode based on the given input buffers and schema.
+   * Decode based on the given input buffers and erasure coding policy.
    */
   public static void decodeAndFillBuffer(final byte[][] decodeInputs,
       AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
@@ -355,20 +355,20 @@
    * by stateful read and uses ByteBuffer as reading target buffer. Besides the
    * read range is within a single stripe thus the calculation logic is simpler.
    */
-  public static AlignedStripe[] divideOneStripe(ECSchema ecSchema,
+  public static AlignedStripe[] divideOneStripe(ErasureCodingPolicy ecPolicy,
       int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup,
       long rangeEndInBlockGroup, ByteBuffer buf) {
-    final int dataBlkNum = ecSchema.getNumDataUnits();
+    final int dataBlkNum = ecPolicy.getNumDataUnits();
     // Step 1: map the byte range to StripingCells
-    StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize,
+    StripingCell[] cells = getStripingCellsOfByteRange(ecPolicy, cellSize,
         blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
 
     // Step 2: get the unmerged ranges on each internal block
-    VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize,
+    VerticalRange[] ranges = getRangesForInternalBlocks(ecPolicy, cellSize,
         cells);
 
     // Step 3: merge into stripes
-    AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges);
+    AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
 
     // Step 4: calculate each chunk's position in destination buffer. Since the
     // whole read range is within a single stripe, the logic is simpler here.
@@ -400,7 +400,7 @@
   /**
    * This method divides a requested byte range into an array of inclusive
    * {@link AlignedStripe}.
-   * @param ecSchema The codec schema for the file, which carries the numbers
+   * @param ecPolicy The codec policy for the file, which carries the numbers
    *                 of data / parity blocks
    * @param cellSize Cell size of stripe
    * @param blockGroup The striped block group
@@ -412,24 +412,24 @@
    * At most 5 stripes will be generated from each logical range, as
    * demonstrated in the header of {@link AlignedStripe}.
    */
-  public static AlignedStripe[] divideByteRangeIntoStripes(ECSchema ecSchema,
+  public static AlignedStripe[] divideByteRangeIntoStripes(ErasureCodingPolicy ecPolicy,
       int cellSize, LocatedStripedBlock blockGroup,
       long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
       int offsetInBuf) {
 
     // Step 0: analyze range and calculate basic parameters
-    final int dataBlkNum = ecSchema.getNumDataUnits();
+    final int dataBlkNum = ecPolicy.getNumDataUnits();
 
     // Step 1: map the byte range to StripingCells
-    StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize,
+    StripingCell[] cells = getStripingCellsOfByteRange(ecPolicy, cellSize,
         blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
 
     // Step 2: get the unmerged ranges on each internal block
-    VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize,
+    VerticalRange[] ranges = getRangesForInternalBlocks(ecPolicy, cellSize,
         cells);
 
     // Step 3: merge into at most 5 stripes
-    AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges);
+    AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
 
     // Step 4: calculate each chunk's position in destination buffer
     calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
@@ -446,7 +446,7 @@
    * used by {@link DFSStripedOutputStream} in encoding
    */
   @VisibleForTesting
-  private static StripingCell[] getStripingCellsOfByteRange(ECSchema ecSchema,
+  private static StripingCell[] getStripingCellsOfByteRange(ErasureCodingPolicy ecPolicy,
       int cellSize, LocatedStripedBlock blockGroup,
       long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
     Preconditions.checkArgument(
@@ -461,16 +461,16 @@
     final int firstCellOffset = (int) (rangeStartInBlockGroup % cellSize);
     final int firstCellSize =
         (int) Math.min(cellSize - (rangeStartInBlockGroup % cellSize), len);
-    cells[0] = new StripingCell(ecSchema, firstCellSize, firstCellIdxInBG,
+    cells[0] = new StripingCell(ecPolicy, firstCellSize, firstCellIdxInBG,
         firstCellOffset);
     if (lastCellIdxInBG != firstCellIdxInBG) {
       final int lastCellSize = (int) (rangeEndInBlockGroup % cellSize) + 1;
-      cells[numCells - 1] = new StripingCell(ecSchema, lastCellSize,
+      cells[numCells - 1] = new StripingCell(ecPolicy, lastCellSize,
           lastCellIdxInBG, 0);
     }
 
     for (int i = 1; i < numCells - 1; i++) {
-      cells[i] = new StripingCell(ecSchema, cellSize, i + firstCellIdxInBG, 0);
+      cells[i] = new StripingCell(ecPolicy, cellSize, i + firstCellIdxInBG, 0);
     }
 
     return cells;
@@ -481,10 +481,10 @@
    * the physical byte range (inclusive) on each stored internal block.
    */
   @VisibleForTesting
-  private static VerticalRange[] getRangesForInternalBlocks(ECSchema ecSchema,
+  private static VerticalRange[] getRangesForInternalBlocks(ErasureCodingPolicy ecPolicy,
       int cellSize, StripingCell[] cells) {
-    int dataBlkNum = ecSchema.getNumDataUnits();
-    int parityBlkNum = ecSchema.getNumParityUnits();
+    int dataBlkNum = ecPolicy.getNumDataUnits();
+    int parityBlkNum = ecPolicy.getNumParityUnits();
 
     VerticalRange ranges[] = new VerticalRange[dataBlkNum + parityBlkNum];
 
@@ -521,9 +521,9 @@
    * {@link AlignedStripe} instances.
    */
   private static AlignedStripe[] mergeRangesForInternalBlocks(
-      ECSchema ecSchema, VerticalRange[] ranges) {
-    int dataBlkNum = ecSchema.getNumDataUnits();
-    int parityBlkNum = ecSchema.getNumParityUnits();
+      ErasureCodingPolicy ecPolicy, VerticalRange[] ranges) {
+    int dataBlkNum = ecPolicy.getNumDataUnits();
+    int parityBlkNum = ecPolicy.getNumParityUnits();
     List<AlignedStripe> stripes = new ArrayList<>();
     SortedSet<Long> stripePoints = new TreeSet<>();
     for (VerticalRange r : ranges) {
@@ -628,7 +628,7 @@
    */
   @VisibleForTesting
   static class StripingCell {
-    final ECSchema schema;
+    final ErasureCodingPolicy ecPolicy;
     /** Logical order in a block group, used when doing I/O to a block group */
     final int idxInBlkGroup;
     final int idxInInternalBlk;
@@ -642,13 +642,13 @@
     final int offset;
     final int size;
 
-    StripingCell(ECSchema ecSchema, int cellSize, int idxInBlkGroup,
+    StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, int idxInBlkGroup,
         int offset) {
-      this.schema = ecSchema;
+      this.ecPolicy = ecPolicy;
       this.idxInBlkGroup = idxInBlkGroup;
-      this.idxInInternalBlk = idxInBlkGroup / ecSchema.getNumDataUnits();
+      this.idxInInternalBlk = idxInBlkGroup / ecPolicy.getNumDataUnits();
       this.idxInStripe = idxInBlkGroup -
-          this.idxInInternalBlk * ecSchema.getNumDataUnits();
+          this.idxInInternalBlk * ecPolicy.getNumDataUnits();
       this.offset = offset;
       this.size = cellSize;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 3233f66..43a1fc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@ -141,7 +141,6 @@
     optional XAttrFeatureProto xAttrs = 9;
     optional uint32 storagePolicyID = 10;
     optional bool isStriped = 11;
-    optional uint64 stripingCellSize = 12;
   }
 
   message QuotaByStorageTypeEntryProto {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 3f0d6df..7d895e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1900,7 +1900,7 @@
       assert dir != null;
       dfs.mkdirs(dir);
       try {
-        dfs.getClient().createErasureCodingZone(dir.toString(), null, 0);
+        dfs.getClient().createErasureCodingZone(dir.toString(), null);
       } catch (IOException e) {
         if (!e.getMessage().contains("non-empty directory")) {
           throw e;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index c68bd28..5e55c34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -257,12 +257,12 @@
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0, null, (byte) 0, null, 0)).when(mockNN).getFileInfo(anyString());
+                1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0, null, (byte) 0, null, 0))
+                1010, 0, null, (byte) 0, null))
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
@@ -550,7 +550,7 @@
       badBlocks.add(badLocatedBlock);
       return new LocatedBlocks(goodBlockList.getFileLength(), false,
                                badBlocks, null, true,
-                               null, null, 0);
+                               null, null);
     }
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index baf6106..ed3c110 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -35,10 +35,10 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.junit.After;
 import org.junit.Assert;
@@ -59,7 +59,7 @@
   private DistributedFileSystem fs;
   private final Path dirPath = new Path("/striped");
   private Path filePath = new Path(dirPath, "file");
-  private final ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
+  private final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
   private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS;
   private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS;
   private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
@@ -79,7 +79,7 @@
     }
     fs = cluster.getFileSystem();
     fs.mkdirs(dirPath);
-    fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE);
+    fs.getClient().createErasureCodingZone(dirPath.toString(), null);
   }
 
   @After
@@ -100,7 +100,7 @@
     LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
         filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
     final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
-        filePath.toString(), false, schema, CELLSIZE, null);
+        filePath.toString(), false, ecPolicy, null);
 
     List<LocatedBlock> lbList = lbs.getLocatedBlocks();
     for (LocatedBlock aLbList : lbList) {
@@ -152,7 +152,7 @@
       }
     }
     DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
-        filePath.toString(), false, schema, CELLSIZE, null);
+        filePath.toString(), false, ecPolicy, null);
 
     int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
         CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
@@ -194,7 +194,7 @@
     }
     DFSStripedInputStream in =
         new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
-            ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE, null);
+            ErasureCodingPolicyManager.getSystemDefaultPolicy(), null);
     int readSize = BLOCK_GROUP_SIZE;
     byte[] readBuffer = new byte[readSize];
     byte[] expected = new byte[readSize];
@@ -292,7 +292,7 @@
 
     DFSStripedInputStream in =
         new DFSStripedInputStream(fs.getClient(), filePath.toString(),
-            false, schema, CELLSIZE, null);
+            false, ecPolicy, null);
 
     byte[] expected = new byte[fileSize];
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 3f40dee..5cab978 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -68,7 +68,7 @@
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     fs = cluster.getFileSystem();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 6594ae1..fed9f16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -79,7 +79,7 @@
     cluster.waitActive();
     dfs = cluster.getFileSystem();
     dfs.mkdirs(dir);
-    dfs.createErasureCodingZone(dir, null, 0);
+    dfs.createErasureCodingZone(dir, null);
   }
 
   private void tearDown() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 4233a1c..6a24685 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -110,7 +110,7 @@
     l2.setCorrupt(true);
 
     List<LocatedBlock> ls = Arrays.asList(l1, l2);
-    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null, 0);
+    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null);
 
     BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
index 9cdb763..989e9fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
@@ -81,7 +81,7 @@
   public void testDataDirectories() throws IOException {
     File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
     Configuration conf = cluster.getConfiguration(0);
-    // 1. Test unsupported schema. Only "file:" is supported.
+    // 1. Test unsupported ecPolicy. Only "file:" is supported.
     String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
     DataNode dn = null;
@@ -97,7 +97,7 @@
     }
     assertNull("Data-node startup should have failed.", dn);
 
-    // 2. Test "file:" schema and no schema (path-only). Both should work.
+    // 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
     String dnDir1 = fileAsURI(dataDir).toString() + "1";
     String dnDir2 = makeURI("file", "localhost",
                     fileAsURI(dataDir).getPath() + "2");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java
deleted file mode 100644
index 88198c9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestECSchemas {
-  private MiniDFSCluster cluster;
-
-  @Before
-  public void before() throws IOException {
-    cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
-        .build();
-    cluster.waitActive();
-  }
-
-  @After
-  public void after() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testGetECSchemas() throws Exception {
-    ECSchema[] ecSchemas = cluster.getFileSystem().getClient().getECSchemas();
-    assertNotNull(ecSchemas);
-    assertTrue("Should have at least one schema", ecSchemas.length > 0);
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 8724ed5..64daeb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -743,7 +743,7 @@
             version, new byte[suite.getAlgorithmBlockSize()],
             new byte[suite.getAlgorithmBlockSize()],
             "fakeKey", "fakeVersion"),
-            (byte) 0, null, 0))
+            (byte) 0, null))
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java
index 1a10ebf..a878501 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java
@@ -22,10 +22,10 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -65,7 +65,7 @@
     fs.mkdir(testDir, FsPermission.getDirDefault());
 
     /* Normal creation of an erasure coding zone */
-    fs.getClient().createErasureCodingZone(testDir.toString(), null, 0);
+    fs.getClient().createErasureCodingZone(testDir.toString(), null);
 
     /* Verify files under the zone are striped */
     final Path ECFilePath = new Path(testDir, "foo");
@@ -78,7 +78,7 @@
     fs.mkdir(notEmpty, FsPermission.getDirDefault());
     fs.create(new Path(notEmpty, "foo"));
     try {
-      fs.getClient().createErasureCodingZone(notEmpty.toString(), null, 0);
+      fs.getClient().createErasureCodingZone(notEmpty.toString(), null);
       fail("Erasure coding zone on non-empty dir");
     } catch (IOException e) {
       assertExceptionContains("erasure coding zone for a non-empty directory", e);
@@ -88,10 +88,10 @@
     final Path zone1 = new Path("/zone1");
     final Path zone2 = new Path(zone1, "zone2");
     fs.mkdir(zone1, FsPermission.getDirDefault());
-    fs.getClient().createErasureCodingZone(zone1.toString(), null, 0);
+    fs.getClient().createErasureCodingZone(zone1.toString(), null);
     fs.mkdir(zone2, FsPermission.getDirDefault());
     try {
-      fs.getClient().createErasureCodingZone(zone2.toString(), null, 0);
+      fs.getClient().createErasureCodingZone(zone2.toString(), null);
       fail("Nested erasure coding zones");
     } catch (IOException e) {
       assertExceptionContains("already in an erasure coding zone", e);
@@ -101,7 +101,7 @@
     final Path fPath = new Path("/file");
     fs.create(fPath);
     try {
-      fs.getClient().createErasureCodingZone(fPath.toString(), null, 0);
+      fs.getClient().createErasureCodingZone(fPath.toString(), null);
       fail("Erasure coding zone on file");
     } catch (IOException e) {
       assertExceptionContains("erasure coding zone for a file", e);
@@ -114,8 +114,8 @@
     final Path dstECDir = new Path("/dstEC");
     fs.mkdir(srcECDir, FsPermission.getDirDefault());
     fs.mkdir(dstECDir, FsPermission.getDirDefault());
-    fs.getClient().createErasureCodingZone(srcECDir.toString(), null, 0);
-    fs.getClient().createErasureCodingZone(dstECDir.toString(), null, 0);
+    fs.getClient().createErasureCodingZone(srcECDir.toString(), null);
+    fs.getClient().createErasureCodingZone(dstECDir.toString(), null);
     final Path srcFile = new Path(srcECDir, "foo");
     fs.create(srcFile);
 
@@ -160,7 +160,7 @@
   public void testReplication() throws IOException {
     final Path testDir = new Path("/ec");
     fs.mkdir(testDir, FsPermission.getDirDefault());
-    fs.createErasureCodingZone(testDir, null, 0);
+    fs.createErasureCodingZone(testDir, null);
     final Path fooFile = new Path(testDir, "foo");
     // create ec file with replication=0
     fs.create(fooFile, FsPermission.getFileDefault(), true,
@@ -177,47 +177,47 @@
   }
 
   @Test
-  public void testGetErasureCodingInfoWithSystemDefaultSchema() throws Exception {
+  public void testGetErasureCodingInfoWithSystemDefaultECPolicy() throws Exception {
     String src = "/ec";
     final Path ecDir = new Path(src);
     fs.mkdir(ecDir, FsPermission.getDirDefault());
     // dir ECInfo before creating ec zone
-    assertNull(fs.getClient().getFileInfo(src).getECSchema());
+    assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
     // dir ECInfo after creating ec zone
-    fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used.
-    ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
-    verifyErasureCodingInfo(src, sysDefaultSchema);
+    fs.getClient().createErasureCodingZone(src, null); //Default one will be used.
+    ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
+    verifyErasureCodingInfo(src, sysDefaultECPolicy);
     fs.create(new Path(ecDir, "child1")).close();
     // verify for the files in ec zone
-    verifyErasureCodingInfo(src + "/child1", sysDefaultSchema);
+    verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy);
   }
 
   @Test
   public void testGetErasureCodingInfo() throws Exception {
-    ECSchema[] sysSchemas = ErasureCodingSchemaManager.getSystemSchemas();
-    assertTrue("System schemas should be of only 1 for now",
-        sysSchemas.length == 1);
+    ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices();
+    assertTrue("System ecPolicies should be of only 1 for now",
+        sysECPolicies.length == 1);
 
-    ECSchema usingSchema = sysSchemas[0];
+    ErasureCodingPolicy usingECPolicy = sysECPolicies[0];
     String src = "/ec2";
     final Path ecDir = new Path(src);
     fs.mkdir(ecDir, FsPermission.getDirDefault());
     // dir ECInfo before creating ec zone
-    assertNull(fs.getClient().getFileInfo(src).getECSchema());
+    assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
     // dir ECInfo after creating ec zone
-    fs.getClient().createErasureCodingZone(src, usingSchema, 0);
-    verifyErasureCodingInfo(src, usingSchema);
+    fs.getClient().createErasureCodingZone(src, usingECPolicy);
+    verifyErasureCodingInfo(src, usingECPolicy);
     fs.create(new Path(ecDir, "child1")).close();
     // verify for the files in ec zone
-    verifyErasureCodingInfo(src + "/child1", usingSchema);
+    verifyErasureCodingInfo(src + "/child1", usingECPolicy);
   }
 
   private void verifyErasureCodingInfo(
-      String src, ECSchema usingSchema) throws IOException {
+      String src, ErasureCodingPolicy usingECPolicy) throws IOException {
     HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
-    ECSchema schema = hdfsFileStatus.getECSchema();
-    assertNotNull(schema);
-    assertEquals("Actually used schema should be equal with target schema",
-        usingSchema, schema);
+    ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy();
+    assertNotNull(ecPolicy);
+    assertEquals("Actually used ecPolicy should be equal with target ecPolicy",
+        usingECPolicy, ecPolicy);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
new file mode 100644
index 0000000..4610ced
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -0,0 +1,65 @@
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFileStatusWithECPolicy {
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private DFSClient client;
+
+  @Before
+  public void before() throws IOException {
+    cluster =
+        new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    client = fs.getClient();
+  }
+
+  @After
+  public void after() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testFileStatusWithECPolicy() throws Exception {
+    // test directory not in EC zone
+    final Path dir = new Path("/foo");
+    assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
+    assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
+    // test file not in EC zone
+    final Path file = new Path(dir, "foo");
+    fs.create(file).close();
+    assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
+    fs.delete(file, true);
+
+    final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
+    // create EC zone on dir
+    fs.createErasureCodingZone(dir, ecPolicy1);
+    final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
+    assertNotNull(ecPolicy2);
+    assertTrue(ecPolicy1.equals(ecPolicy2));
+
+    // test file in EC zone
+    fs.create(file).close();
+    final ErasureCodingPolicy ecPolicy3 =
+        fs.getClient().getFileInfo(file.toUri().getPath()).getErasureCodingPolicy();
+    assertNotNull(ecPolicy3);
+    assertTrue(ecPolicy1.equals(ecPolicy3));
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java
deleted file mode 100644
index 3c400b7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java
+++ /dev/null
@@ -1,65 +0,0 @@
-package org.apache.hadoop.hdfs;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestFileStatusWithECschema {
-  private MiniDFSCluster cluster;
-  private DistributedFileSystem fs;
-  private DFSClient client;
-
-  @Before
-  public void before() throws IOException {
-    cluster =
-        new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
-    cluster.waitActive();
-    fs = cluster.getFileSystem();
-    client = fs.getClient();
-  }
-
-  @After
-  public void after() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testFileStatusWithECschema() throws Exception {
-    // test directory not in EC zone
-    final Path dir = new Path("/foo");
-    assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
-    assertNull(client.getFileInfo(dir.toString()).getECSchema());
-    // test file not in EC zone
-    final Path file = new Path(dir, "foo");
-    fs.create(file).close();
-    assertNull(client.getFileInfo(file.toString()).getECSchema());
-    fs.delete(file, true);
-
-    final ECSchema schema1 = ErasureCodingSchemaManager.getSystemDefaultSchema();
-    // create EC zone on dir
-    fs.createErasureCodingZone(dir, schema1, 0);
-    final ECSchema schame2 = client.getFileInfo(dir.toUri().getPath()).getECSchema();
-    assertNotNull(schame2);
-    assertTrue(schema1.equals(schame2));
-
-    // test file in EC zone
-    fs.create(file).close();
-    final ECSchema schame3 =
-        fs.getClient().getFileInfo(file.toUri().getPath()).getECSchema();
-    assertNotNull(schame3);
-    assertTrue(schema1.equals(schame3));
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index d0cd335..b77ff3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -354,12 +354,12 @@
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
             (short) 777), "owner", "group", new byte[0], new byte[0],
-            1010, 0, null, (byte) 0, null, 0)).when(mcp).getFileInfo(anyString());
+            1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
     Mockito
         .doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0, null, (byte) 0, null, 0))
+                1010, 0, null, (byte) 0, null))
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
index 1719d3f..cb2ec11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
@@ -64,8 +64,7 @@
   public void setup() throws IOException {
     cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
         .numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/",
-        null, cellSize);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     fs = cluster.getFileSystem();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
index 4c2438d..38256ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
@@ -52,8 +52,7 @@
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/",
-        null, cellSize);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     fs = cluster.getFileSystem();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java
index 9285fd7..5c17359 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java
@@ -78,7 +78,7 @@
     cluster.waitActive();
     
     fs = cluster.getFileSystem();
-    fs.getClient().createErasureCodingZone("/", null, 0);
+    fs.getClient().createErasureCodingZone("/", null);
 
     List<DataNode> datanodes = cluster.getDataNodes();
     for (int i = 0; i < dnNum; i++) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 6f0bc71..f577ddb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -54,8 +54,7 @@
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/",
-        null, cellSize);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     cluster.waitActive();
 
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index 089a134..810edb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -57,8 +57,7 @@
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/",
-        null, cellSize);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     fs = cluster.getFileSystem();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index 3679c5f..8584823 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -48,8 +48,7 @@
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/",
-        null, cellSize);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     fs = cluster.getFileSystem();
   }
 
@@ -159,4 +158,4 @@
       throw new IOException("Failed at i=" + i, e);
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index 3675e63..6942ac8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -71,7 +71,7 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@@ -88,7 +88,7 @@
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
@@ -682,8 +682,7 @@
     short[] liveBlkIndices0 = new short[2];
     BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo(
         new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
-        liveBlkIndices0, ErasureCodingSchemaManager.getSystemDefaultSchema(),
-        64 * 1024);
+        liveBlkIndices0, ErasureCodingPolicyManager.getSystemDefaultPolicy());
     DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
         DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
     DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
@@ -697,8 +696,7 @@
     short[] liveBlkIndices1 = new short[2];
     BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo(
         new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
-        liveBlkIndices1, ErasureCodingSchemaManager.getSystemDefaultSchema(),
-        64 * 1024);
+        liveBlkIndices1, ErasureCodingPolicyManager.getSystemDefaultPolicy());
     List<BlockECRecoveryInfo> blkRecoveryInfosList = new ArrayList<BlockECRecoveryInfo>();
     blkRecoveryInfosList.add(blkECRecoveryInfo0);
     blkRecoveryInfosList.add(blkECRecoveryInfo1);
@@ -740,18 +738,18 @@
       assertEquals(liveBlockIndices1[i], liveBlockIndices2[i]);
     }
     
-    ECSchema ecSchema1 = blkECRecoveryInfo1.getECSchema();
-    ECSchema ecSchema2 = blkECRecoveryInfo2.getECSchema();
-    // Compare ECSchemas same as default ECSchema as we used system default
-    // ECSchema used in this test
-    compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema1);
-    compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema2);
+    ErasureCodingPolicy ecPolicy1 = blkECRecoveryInfo1.getErasureCodingPolicy();
+    ErasureCodingPolicy ecPolicy2 = blkECRecoveryInfo2.getErasureCodingPolicy();
+    // Compare ECPolicies same as default ECPolicy as we used system default
+    // ECPolicy used in this test
+    compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy1);
+    compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy2);
   }
 
-  private void compareECSchemas(ECSchema ecSchema1, ECSchema ecSchema2) {
-    assertEquals(ecSchema1.getSchemaName(), ecSchema2.getSchemaName());
-    assertEquals(ecSchema1.getNumDataUnits(), ecSchema2.getNumDataUnits());
-    assertEquals(ecSchema1.getNumParityUnits(), ecSchema2.getNumParityUnits());
+  private void compareECPolicies(ErasureCodingPolicy ecPolicy1, ErasureCodingPolicy ecPolicy2) {
+    assertEquals(ecPolicy1.getName(), ecPolicy2.getName());
+    assertEquals(ecPolicy1.getNumDataUnits(), ecPolicy2.getNumDataUnits());
+    assertEquals(ecPolicy1.getNumParityUnits(), ecPolicy2.getNumParityUnits());
   }
 
   private void assertDnInfosEqual(DatanodeInfo[] dnInfos1,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 3c149d0..169027c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1581,7 +1581,7 @@
       cluster.waitActive();
       client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
           ClientProtocol.class).getProxy();
-      client.createErasureCodingZone("/", null, 0);
+      client.createErasureCodingZone("/", null);
 
       long totalCapacity = sum(capacities);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index 6788770..1b23600 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@ -19,10 +19,9 @@
 
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
@@ -45,11 +44,10 @@
   private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
   private static final long BASE_ID = -1600;
   private static final Block baseBlock = new Block(BASE_ID);
-  private static final ECSchema testSchema
-      = ErasureCodingSchemaManager.getSystemDefaultSchema();
-  private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+  private static final ErasureCodingPolicy testECPolicy
+      = ErasureCodingPolicyManager.getSystemDefaultPolicy();
   private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
-      testSchema, cellSize);
+      testECPolicy);
 
   private Block[] createReportedBlocks(int num) {
     Block[] blocks = new Block[num];
@@ -237,7 +235,7 @@
     ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
     DataOutput out = new DataOutputStream(byteStream);
     BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes,
-        generationStamp), testSchema, cellSize);
+        generationStamp), testECPolicy);
 
     try {
       blk.write(out);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
index f985f54..2202b34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
@@ -56,7 +56,7 @@
     conf = getConf();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient()
-        .createErasureCodingZone("/", null, cellSize);
+        .createErasureCodingZone("/", null);
     try {
       cluster.waitActive();
       doTestRead(conf, cluster, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
index 2f2356f..2e084fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
@@ -86,7 +86,7 @@
         .getBlockGroupIdGenerator();
     fs.mkdirs(eczone);
     cluster.getFileSystem().getClient()
-        .createErasureCodingZone("/eczone", null, cellSize);
+        .createErasureCodingZone("/eczone", null);
   }
 
   @After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
index 0f419ef..7cd2e19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
@@ -19,9 +19,8 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
@@ -31,17 +30,15 @@
 
 public class TestUnderReplicatedBlockQueues {
 
-  private final ECSchema ecSchema =
-      ErasureCodingSchemaManager.getSystemDefaultSchema();
-  private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
 
   private BlockInfo genBlockInfo(long id) {
     return new BlockInfoContiguous(new Block(id), (short) 3);
   }
 
   private BlockInfo genStripedBlockInfo(long id, long numBytes) {
-    BlockInfoStriped sblk =  new BlockInfoStriped(new Block(id), ecSchema,
-        CELLSIZE);
+    BlockInfoStriped sblk =  new BlockInfoStriped(new Block(id), ecPolicy);
     sblk.setNumBytes(numBytes);
     return sblk;
   }
@@ -101,8 +98,8 @@
 
   @Test
   public void testStripedBlockPriorities() throws Throwable {
-    int dataBlkNum = ecSchema.getNumDataUnits();
-    int parityBlkNUm = ecSchema.getNumParityUnits();
+    int dataBlkNum = ecPolicy.getNumDataUnits();
+    int parityBlkNUm = ecPolicy.getNumParityUnits();
     doTestStripedBlockPriorities(1, parityBlkNUm);
     doTestStripedBlockPriorities(dataBlkNum, parityBlkNUm);
   }
@@ -110,7 +107,7 @@
   private void doTestStripedBlockPriorities(int dataBlkNum, int parityBlkNum)
       throws Throwable {
     int groupSize = dataBlkNum + parityBlkNum;
-    long numBytes = CELLSIZE * dataBlkNum;
+    long numBytes = ecPolicy.getCellSize() * dataBlkNum;
     UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
 
     // add a striped block which been left NUM_DATA_BLOCKS internal blocks
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 5487548..bf99e59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -473,7 +473,7 @@
       client.setStoragePolicy(barDir,
           HdfsConstants.HOT_STORAGE_POLICY_NAME);
       // set "/bar" directory with EC zone.
-      client.createErasureCodingZone(barDir, null, 0);
+      client.createErasureCodingZone(barDir, null);
 
       // write file to barDir
       final String fooFile = "/bar/foo";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
index 337911d0..7d06a9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
@@ -70,7 +70,7 @@
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs.mkdirs(dirPath);
-    fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE);
+    fs.getClient().createErasureCodingZone(dirPath.toString(), null);
   }
 
   @After
@@ -180,7 +180,7 @@
     long groupId = bg.getBlock().getBlockId();
     Block blk = new Block(groupId, BLOCK_SIZE, gs);
     BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
-        ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE);
+        ErasureCodingPolicyManager.getSystemDefaultPolicy());
     for (int i = 0; i < GROUP_SIZE; i++) {
       blk.setBlockId(groupId + i);
       cluster.injectBlocks(i, Arrays.asList(blk), bpid);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index b8aac71..be14975 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -75,7 +75,7 @@
         .numDataNodes(GROUP_SIZE).build();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
-    dfs.getClient().createErasureCodingZone("/", null, 0);
+    dfs.getClient().createErasureCodingZone("/", null);
   }
 
   @After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 6426b23..694411f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -45,7 +45,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -57,7 +57,6 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
@@ -76,8 +75,8 @@
 
   private static final int NUM_DATA_NODES = 0;
 
-  private static final ECSchema testSchema
-      = ErasureCodingSchemaManager.getSystemDefaultSchema();
+  private static final ErasureCodingPolicy testECPolicy
+      = ErasureCodingPolicyManager.getSystemDefaultPolicy();
   
   @Test
   public void testDisplayRecentEditLogOpCodes() throws IOException {
@@ -450,11 +449,10 @@
       long timestamp = 1426222918;
       short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
       short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
-      int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 
       //set the storage policy of the directory
       fs.mkdir(new Path(testDir), new FsPermission("755"));
-      fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0);
+      fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
 
       // Create a file with striped block
       Path p = new Path(testFilePath);
@@ -466,7 +464,7 @@
 
       // Add a striped block to the file
       BlockInfoStriped stripedBlk = new BlockInfoStriped(
-          new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
+          new Block(blkId, blkNumBytes, timestamp), testECPolicy);
       INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
       file.toUnderConstruction(clientName, clientMachine);
       file.addBlock(stripedBlk);
@@ -491,7 +489,6 @@
       assertEquals(timestamp, blks[0].getGenerationStamp());
       assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
       assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
-      assertEquals(cellSize, ((BlockInfoStriped)blks[0]).getCellSize());
 
       cluster.shutdown();
       cluster = null;
@@ -524,17 +521,16 @@
       long timestamp = 1426222918;
       short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
       short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
-      int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 
       //set the storage policy of the directory
       fs.mkdir(new Path(testDir), new FsPermission("755"));
-      fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0);
+      fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
 
       //create a file with striped blocks
       Path p = new Path(testFilePath);
       DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
       BlockInfoStriped stripedBlk = new BlockInfoStriped(
-          new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
+          new Block(blkId, blkNumBytes, timestamp), testECPolicy);
       INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
       file.toUnderConstruction(clientName, clientMachine);
       file.addBlock(stripedBlk);
@@ -573,7 +569,6 @@
       assertEquals(newTimestamp, blks[0].getGenerationStamp());
       assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
       assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
-      assertEquals(cellSize, ((BlockInfoStriped)blks[0]).getCellSize());
 
       cluster.shutdown();
       cluster = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 6b1249b..60a1f16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -34,11 +34,11 @@
 import java.util.EnumSet;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -70,9 +70,8 @@
 
   private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
       "image-with-zero-block-size.tar.gz";
-  private static final ECSchema testSchema
-      = ErasureCodingSchemaManager.getSystemDefaultSchema();
-  private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+  private static final ErasureCodingPolicy testECPolicy
+      = ErasureCodingPolicyManager.getSystemDefaultPolicy();
 
   @Test
   public void testPersist() throws IOException {
@@ -182,7 +181,7 @@
   private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
                                                boolean isUC) throws IOException{
     // contruct a INode with StripedBlock for saving and loading
-    fsn.createErasureCodingZone("/", null, 0, false);
+    fsn.createErasureCodingZone("/", null, false);
     long id = 123456789;
     byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
     PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
@@ -203,7 +202,7 @@
     for (int i = 0; i < stripedBlks.length; i++) {
       stripedBlks[i] = new BlockInfoStriped(
               new Block(stripedBlkId + i, preferredBlockSize, timestamp),
-              testSchema, cellSize);
+              testECPolicy);
       file.addBlock(stripedBlks[i]);
     }
 
@@ -427,7 +426,7 @@
           .build();
       cluster.waitActive();
       DistributedFileSystem fs = cluster.getFileSystem();
-      fs.getClient().getNamenode().createErasureCodingZone("/", null, 0);
+      fs.getClient().getNamenode().createErasureCodingZone("/", null);
       Path file = new Path("/striped");
       FSDataOutputStream out = fs.create(file);
       byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index c988464..1bf0f5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1202,7 +1202,7 @@
 
     HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
         blockSize, modTime, accessTime, perms, owner, group, symlink,
-        path, fileId, numChildren, null, storagePolicy, null, 0);
+        path, fileId, numChildren, null, storagePolicy, null);
     Result replRes = new ReplicationResult(conf);
     Result ecRes = new ErasureCodingResult(conf);
 
@@ -1644,8 +1644,8 @@
       final long precision = 1L;
       conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
       conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      int totalSize = ErasureCodingSchemaManager.getSystemDefaultSchema().getNumDataUnits()
-                      + ErasureCodingSchemaManager.getSystemDefaultSchema().getNumParityUnits();
+      int totalSize = ErasureCodingPolicyManager.getSystemDefaultPolicy().getNumDataUnits()
+                      + ErasureCodingPolicyManager.getSystemDefaultPolicy().getNumParityUnits();
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
       fs = cluster.getFileSystem();
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index f81005f..55c0d5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -26,10 +26,10 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -43,10 +43,10 @@
 public class TestQuotaWithStripedBlocks {
   private static final int BLOCK_SIZE = 1024 * 1024;
   private static final long DISK_QUOTA = BLOCK_SIZE * 10;
-  private static final ECSchema ecSchema =
-      ErasureCodingSchemaManager.getSystemDefaultSchema();
-  private static final int NUM_DATA_BLOCKS = ecSchema.getNumDataUnits();
-  private static final int NUM_PARITY_BLOCKS = ecSchema.getNumParityUnits();
+  private static final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private static final int NUM_DATA_BLOCKS = ecPolicy.getNumDataUnits();
+  private static final int NUM_PARITY_BLOCKS = ecPolicy.getNumParityUnits();
   private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
   private static final Path ecDir = new Path("/ec");
 
@@ -65,7 +65,7 @@
     dfs = cluster.getFileSystem();
 
     dfs.mkdirs(ecDir);
-    dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema, 0);
+    dfs.getClient().createErasureCodingZone(ecDir.toString(), ecPolicy);
     dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
     dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
     dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index 2182700..8b1a11f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -35,13 +35,13 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 import org.junit.Test;
 
@@ -59,9 +59,8 @@
   private final BlockStoragePolicy defaultPolicy =
       defaultSuite.getDefaultPolicy();
 
-  private static final ECSchema testSchema
-      = ErasureCodingSchemaManager.getSystemDefaultSchema();
-  private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+  private static final ErasureCodingPolicy testECPolicy
+      = ErasureCodingPolicyManager.getSystemDefaultPolicy();
 
   private static INodeFile createStripedINodeFile() {
     return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
@@ -79,7 +78,7 @@
   public void testBlockStripedTotalBlockCount() {
     Block blk = new Block(1);
     BlockInfoStriped blockInfoStriped
-        = new BlockInfoStriped(blk, testSchema, cellSize);
+        = new BlockInfoStriped(blk, testECPolicy);
     assertEquals(9, blockInfoStriped.getTotalBlockNum());
   }
 
@@ -89,7 +88,7 @@
     INodeFile inf = createStripedINodeFile();
     Block blk = new Block(1);
     BlockInfoStriped blockInfoStriped
-        = new BlockInfoStriped(blk, testSchema, cellSize);
+        = new BlockInfoStriped(blk, testECPolicy);
     inf.addBlock(blockInfoStriped);
     assertEquals(1, inf.getBlocks().length);
   }
@@ -100,7 +99,7 @@
     INodeFile inf = createStripedINodeFile();
     Block blk = new Block(1);
     BlockInfoStriped blockInfoStriped
-        = new BlockInfoStriped(blk, testSchema, cellSize);
+        = new BlockInfoStriped(blk, testECPolicy);
     blockInfoStriped.setNumBytes(1);
     inf.addBlock(blockInfoStriped);
     //   0. Calculate the total bytes per stripes <Num Bytes per Stripes>
@@ -125,11 +124,11 @@
     INodeFile inf = createStripedINodeFile();
     Block blk1 = new Block(1);
     BlockInfoStriped blockInfoStriped1
-        = new BlockInfoStriped(blk1, testSchema, cellSize);
+        = new BlockInfoStriped(blk1, testECPolicy);
     blockInfoStriped1.setNumBytes(1);
     Block blk2 = new Block(2);
     BlockInfoStriped blockInfoStriped2
-        = new BlockInfoStriped(blk2, testSchema, cellSize);
+        = new BlockInfoStriped(blk2, testECPolicy);
     blockInfoStriped2.setNumBytes(1);
     inf.addBlock(blockInfoStriped1);
     inf.addBlock(blockInfoStriped2);
@@ -144,7 +143,7 @@
     INodeFile inf = createStripedINodeFile();
     Block blk = new Block(1);
     BlockInfoStriped blockInfoStriped
-        = new BlockInfoStriped(blk, testSchema, cellSize);
+        = new BlockInfoStriped(blk, testECPolicy);
     blockInfoStriped.setNumBytes(100);
     inf.addBlock(blockInfoStriped);
     // Compute file size should return actual data
@@ -158,10 +157,10 @@
       throws IOException, InterruptedException {
     INodeFile inf = createStripedINodeFile();
     Block blk = new Block(1);
-    BlockInfoStripedUnderConstruction bInfoStripedUC
-        = new BlockInfoStripedUnderConstruction(blk, testSchema, cellSize);
-    bInfoStripedUC.setNumBytes(100);
-    inf.addBlock(bInfoStripedUC);
+    BlockInfoStripedUnderConstruction bInfoUCStriped
+        = new BlockInfoStripedUnderConstruction(blk, testECPolicy);
+    bInfoUCStriped.setNumBytes(100);
+    inf.addBlock(bInfoUCStriped);
     assertEquals(100, inf.computeFileSize());
     assertEquals(0, inf.computeFileSize(false, false));
   }
@@ -172,7 +171,7 @@
     INodeFile inf = createStripedINodeFile();
     Block blk = new Block(1);
     BlockInfoStriped blockInfoStriped
-        = new BlockInfoStriped(blk, testSchema, cellSize);
+        = new BlockInfoStriped(blk, testECPolicy);
     blockInfoStriped.setNumBytes(100);
     inf.addBlock(blockInfoStriped);
 
@@ -192,10 +191,10 @@
       throws IOException, InterruptedException {
     INodeFile inf = createStripedINodeFile();
     Block blk = new Block(1);
-    BlockInfoStripedUnderConstruction bInfoStripedUC
-        = new BlockInfoStripedUnderConstruction(blk, testSchema, cellSize);
-    bInfoStripedUC.setNumBytes(100);
-    inf.addBlock(bInfoStripedUC);
+    BlockInfoStripedUnderConstruction bInfoUCStriped
+        = new BlockInfoStripedUnderConstruction(blk, testECPolicy);
+    bInfoUCStriped.setNumBytes(100);
+    inf.addBlock(bInfoUCStriped);
 
     QuotaCounts counts
         = inf.computeQuotaUsageWithStriped(defaultPolicy,
@@ -235,7 +234,7 @@
       dfs.mkdirs(zone);
 
       // create erasure zone
-      dfs.createErasureCodingZone(zone, null, 0);
+      dfs.createErasureCodingZone(zone, null);
       DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED);
       DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
       final FSDirectory fsd = fsn.getFSDirectory();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index f4efbcf..7bfaab6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -60,7 +60,7 @@
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.waitActive();
-    cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
     fs = cluster.getFileSystem();
     Path eczone = new Path("/eczone");
     fs.mkdirs(eczone);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
index 5d85073..e61ac07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
@@ -29,8 +29,8 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
 
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -84,8 +84,8 @@
   private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
   /** number of full stripes in a full block group */
   private final int BLK_GROUP_STRIPE_NUM = 16;
-  private final ECSchema SCEHMA = ErasureCodingSchemaManager.
-      getSystemDefaultSchema();
+  private final ErasureCodingPolicy ECPOLICY = ErasureCodingPolicyManager.
+      getSystemDefaultPolicy();
   private final Random random = new Random();
 
   private int[] blockGroupSizes;
@@ -152,7 +152,7 @@
     int done = 0;
     while (done < bgSize) {
       Preconditions.checkState(done % CELLSIZE == 0);
-      StripingCell cell = new StripingCell(SCEHMA, CELLSIZE, done / CELLSIZE, 0);
+      StripingCell cell = new StripingCell(ECPOLICY, CELLSIZE, done / CELLSIZE, 0);
       int idxInStripe = cell.idxInStripe;
       int size = Math.min(CELLSIZE, bgSize - done);
       for (int i = 0; i < size; i++) {
@@ -245,7 +245,7 @@
           if (brStart + brSize > bgSize) {
             continue;
           }
-          AlignedStripe[] stripes = divideByteRangeIntoStripes(SCEHMA,
+          AlignedStripe[] stripes = divideByteRangeIntoStripes(ECPOLICY,
               CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
 
           for (AlignedStripe stripe : stripes) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 303d063..8947c5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -65,7 +65,7 @@
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
         now, now + 10, new FsPermission((short) 0644), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
-        HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null, 0);
+        HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 70020d5..5e60658 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -57,11 +57,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^[ \t]*Create a zone to encode files using a specified schema( )*</expected-output>
+          <expected-output>^[ \t]*Create a zone to encode files using a specified policy( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-createZone \[-s &lt;schemaName&gt;\] \[-c &lt;cellSize&gt;\] &lt;path&gt;(.)*</expected-output>
+          <expected-output>^-createZone \[-s &lt;policyName&gt;\] &lt;path&gt;(.)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -86,20 +86,20 @@
     </test>
 
     <test>
-      <description>help: listSchemas command</description>
+      <description>help: listPolicies command</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -help listSchemas</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -help listPolicies</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Get the list of ECSchemas supported</expected-output>
+          <expected-output>Get the list of erasure coding policies supported</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-listSchemas (.)*</expected-output>
+          <expected-output>^-listPolicies (.)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -109,7 +109,7 @@
       <description>createZone : create a zone to encode files</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /eczone</command>
-        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /eczone</command>
@@ -141,7 +141,7 @@
     </test>
 
     <test>
-      <description>createZone : default schema</description>
+      <description>createZone : default policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /eczone</command>
         <ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
@@ -153,7 +153,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
+          <expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -179,7 +179,7 @@
       <description>getZone : get information about the EC zone at specified path</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /eczone</command>
-        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -188,7 +188,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
+          <expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -197,7 +197,7 @@
       <description>getZone : get EC zone at specified file path</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /eczone</command>
-        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
         <command>-fs NAMENODE -touchz /eczone/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
       </test-commands>
@@ -208,15 +208,15 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
+          <expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
 
     <test>
-      <description>listSchemas : get the list of ECSchemas supported</description>
+      <description>listPolicies : get the list of ECPolicies supported</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -listSchemas</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -listPolicies</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -247,7 +247,7 @@
     </test>
 
     <test>
-      <description>createZone : illegal parameters - schema name is missing</description>
+      <description>createZone : illegal parameters - policy name is missing</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /eczone</command>
         <ec-admin-command>-fs NAMENODE -createZone -s</ec-admin-command>
@@ -281,10 +281,10 @@
     </test>
 
     <test>
-      <description>createZone : illegal parameters - invalidschema</description>
+      <description>createZone : illegal parameters - invalidpolicy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /eczone</command>
-        <ec-admin-command>-fs NAMENODE -createZone -s invalidschema /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -createZone -s invalidpolicy /eczone</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /eczone</command>
@@ -292,7 +292,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Schema 'invalidschema' does not match any of the supported schemas. Please select any one of [RS-6-3]</expected-output>
+          <expected-output>Policy 'invalidpolicy' does not match any of the supported policies. Please select any one of [RS-6-3-64k]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -359,16 +359,16 @@
     </test>
 
     <test>
-      <description>listSchemas : illegal parameters - too many parameters</description>
+      <description>listPolicies : illegal parameters - too many parameters</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -listSchemas /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -listPolicies /eczone</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>-listSchemas: Too many parameters</expected-output>
+          <expected-output>-listPolicies: Too many parameters</expected-output>
         </comparator>
       </comparators>
     </test>