[CARBONDATA-3889] Cleanup typo code for carbondata-core module

Why is this PR needed?
There are many typos in carbondata-core module

What changes were proposed in this PR?
Cleanup typo code for carbondata-core module

Does this PR introduce any user interface change?
No
Is any new testcase added?
No

This closes #3828
diff --git a/core/pom.xml b/core/pom.xml
index 5cf11a7..bf9a90f 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -147,7 +147,7 @@
             <phase>generate-resources</phase>
             <!-- Execute the shell script to generate the CarbonData build information. -->
             <configuration>
-              <executable>${project.basedir}/../build/carbondata-build-info${script.exetension}</executable>
+              <executable>${project.basedir}/../build/carbondata-build-info${script.extension}</executable>
               <arguments>
                 <argument>${project.build.directory}/extra-resources</argument>
                 <argument>${project.version}</argument>
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java b/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java
index e348890..2efa3ad 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java
@@ -40,7 +40,7 @@
 
   /**
    * Method to be used for invalidating the cacheable object. API to be invoked at the time of
-   * removing the cacheable object from memory. Example at the of removing the cachebale object
+   * removing the cacheable object from memory. Example at removing the cacheable object
    * from LRU cache
    */
   void invalidate();
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java b/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java
index 3b19425..0759798 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java
@@ -282,7 +282,7 @@
   }
 
   /**
-   * This method will check if size is available to laod dictionary into memory
+   * This method will check if size is available to load dictionary into memory
    *
    * @param requiredSize
    * @return
@@ -306,8 +306,8 @@
    */
   public void clear() {
     synchronized (expiringMap) {
-      for (Cacheable cachebleObj : expiringMap.values()) {
-        cachebleObj.invalidate();
+      for (Cacheable cacheable : expiringMap.values()) {
+        cacheable.invalidate();
       }
       expiringMap.clear();
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
index 3a8aa3d..5950611 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
@@ -144,7 +144,7 @@
   public static final String ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH_DEFAULT = "false";
 
   /**
-   * If the sort memory is insufficient, spill inmemory pages to disk.
+   * If the sort memory is insufficient, spill in-memory pages to disk.
    * The total amount of pages is at most the specified percentage of total sort memory. Default
    * value 0 means that no pages will be spilled and the newly incoming pages will be spilled,
    * whereas value 100 means that all pages will be spilled and newly incoming pages will be loaded
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonV3DataFormatConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonV3DataFormatConstants.java
index 1dbf470..a170b8a 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonV3DataFormatConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonV3DataFormatConstants.java
@@ -49,7 +49,7 @@
   /**
    * number of column to be read in one IO in query default value
    */
-  String NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE = "10";
+  String NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE = "10";
 
   /**
    * number of column to be read in one IO in query max value
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/FileReader.java b/core/src/main/java/org/apache/carbondata/core/datastore/FileReader.java
index df249dd..36fa20f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/FileReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/FileReader.java
@@ -56,7 +56,7 @@
   byte[] readByteArray(String filePath, int length) throws IOException;
 
   /**
-   * This method will be used to read int from file from postion(offset), here
+   * This method will be used to read int from file from position(offset), here
    * length will be always 4 because int byte size if 4
    *
    * @param filePath fully qualified file path
@@ -66,7 +66,7 @@
   int readInt(String filePath, long offset) throws IOException;
 
   /**
-   * This method will be used to read long from file from postion(offset), here
+   * This method will be used to read long from file from position(offset), here
    * length will be always 8 because int byte size is 8
    *
    * @param filePath fully qualified file path
@@ -76,7 +76,7 @@
   long readLong(String filePath, long offset) throws IOException;
 
   /**
-   * This method will be used to read int from file from postion(offset), here
+   * This method will be used to read int from file from position(offset), here
    * length will be always 4 because int byte size if 4
    *
    * @param filePath fully qualified file path
@@ -85,7 +85,7 @@
   int readInt(String filePath) throws IOException;
 
   /**
-   * This method will be used to read long value from file from postion(offset), here
+   * This method will be used to read long value from file from position(offset), here
    * length will be always 8 because long byte size if 4
    *
    * @param filePath fully qualified file path
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/ReusableDataBuffer.java b/core/src/main/java/org/apache/carbondata/core/datastore/ReusableDataBuffer.java
index d0add0c..63372fc 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/ReusableDataBuffer.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/ReusableDataBuffer.java
@@ -44,7 +44,7 @@
    * If requested size is less it will return same buffer, if size is more
    * it resize the buffer and return
    * @param requestedSize
-   * @return databuffer
+   * @return dataBuffer
    */
   public byte[] getDataBuffer(int requestedSize) {
     if (dataBuffer == null || requestedSize > size) {
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/TableSpec.java b/core/src/main/java/org/apache/carbondata/core/datastore/TableSpec.java
index ae6507c..fe98e96 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/TableSpec.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/TableSpec.java
@@ -159,10 +159,10 @@
     this.dictDimActualPosition = new int[dictSortDimSpec.size()];
     this.noDictDimActualPosition = new int[noDictSortDimSpec.size()];
     for (int i = 0; i < dictDimActualPosition.length; i++) {
-      dictDimActualPosition[i] = dictSortDimSpec.get(i).getActualPostion();
+      dictDimActualPosition[i] = dictSortDimSpec.get(i).getActualPosition();
     }
     for (int i = 0; i < noDictDimActualPosition.length; i++) {
-      noDictDimActualPosition[i] = noDictSortDimSpec.get(i).getActualPostion();
+      noDictDimActualPosition[i] = noDictSortDimSpec.get(i).getActualPosition();
     }
     isUpdateNoDictDims = !noDictSortDimSpec.equals(noDictionaryDimensionSpec);
     isUpdateDictDim = !dictSortDimSpec.equals(dictDimensionSpec);
@@ -342,13 +342,13 @@
     // indicate whether this dimension need to do inverted index
     private boolean doInvertedIndex;
 
-    // indicate the actual postion in blocklet
-    private short actualPostion;
-    DimensionSpec(ColumnType columnType, CarbonDimension dimension, short actualPostion) {
+    // indicate the actual position in blocklet
+    private short actualPosition;
+    DimensionSpec(ColumnType columnType, CarbonDimension dimension, short actualPosition) {
       super(dimension.getColName(), dimension.getDataType(), columnType);
       this.inSortColumns = dimension.isSortColumn();
       this.doInvertedIndex = dimension.isUseInvertedIndex();
-      this.actualPostion = actualPostion;
+      this.actualPosition = actualPosition;
     }
 
     public boolean isInSortColumns() {
@@ -359,8 +359,8 @@
       return doInvertedIndex;
     }
 
-    public short getActualPostion() {
-      return actualPostion;
+    public short getActualPosition() {
+      return actualPosition;
     }
 
     @Override
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
index c93e162..3f9d310 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
@@ -52,8 +52,7 @@
   private long deleteDeltaTimestamp;
 
   /**
-   * map of blockletidAndPageId to
-   * deleted rows
+   * map of blockletIdAndPageId to deleted rows
    */
   private Map<String, DeleteDeltaVo> deletedRowsMap;
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
index 25540a7..1d291d2 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
@@ -50,19 +50,19 @@
   private static final Logger LOG =
         LogServiceFactory.getLogService(SegmentProperties.class.getName());
 
-  // When calcuting the fingerpinter of all columns. In order to
-  // identify dimension columns with other column. The fingerprinter
-  // of dimensions will leftshift 1 bit
+  // When calculating the finger printer of all columns. In order to
+  // identify dimension columns with other column. The finger printer
+  // of dimensions will left shift 1 bit
   private static final int DIMENSIONS_FINGER_PRINTER_SHIFT = 1;
 
-  // When calcuting the fingerpinter of all columns. In order to
-  // identify measure columns with other column. The fingerprinter
-  // of measures will leftshift 2 bit
+  // When calculating the finger pinter of all columns. In order to
+  // identify measure columns with other column. The finger printer
+  // of measures will left shift 2 bit
   private static final int MEASURES_FINGER_PRINTER_SHIFT = 2;
 
-  // When calcuting the fingerpinter of all columns. In order to
-  // identify complex columns with other column. The fingerprinter
-  // of complex columns will leftshift 3 bit
+  // When calculating the finger pinter of all columns. In order to
+  // identify complex columns with other column. The finger printer
+  // of complex columns will left shift 3 bit
   private static final int COMPLEX_FINGER_PRINTER_SHIFT = 3;
 
   /**
@@ -89,7 +89,7 @@
    * a block can have multiple columns. This will have block index as key
    * and all dimension participated in that block as values
    */
-  private Map<Integer, Set<Integer>> blockTodimensionOrdinalMapping;
+  private Map<Integer, Set<Integer>> blockToDimensionOrdinalMapping;
 
   /**
    * mapping of measure ordinal in schema to column chunk index in the data file
@@ -113,23 +113,23 @@
   private int lastDimensionColOrdinal;
 
   /**
-   * The fingerprinter is the xor result of all the columns in table.
-   * Besides, in the case of two segmentproperties have same columns
-   * but different sortcolumn, n like there is a column exists in both
-   * segmentproperties, but is dimension in one segmentproperties,
+   * The finger printer is the xor result of all the columns in table.
+   * Besides, in the case of two segment properties have same columns
+   * but different sort column, n like there is a column exists in both
+   * segment properties, but is dimension in one segment properties,
    * but is a measure in the other. In order to identify the difference
-   * of these two segmentproperties. The xor result of all dimension
-   * will leftshift 1 bit, the xor results of all measures will leftshift
-   * 2bit, and the xor results of all complex columns will leftshift 3 bits
-   * Sum up, the Formula of generate fingerprinter is
+   * of these two segment properties. The xor result of all dimension
+   * will left shift 1 bit, the xor results of all measures will left shift
+   * 2bit, and the xor results of all complex columns will left shift 3 bits
+   * Sum up, the Formula of generate finger printer is
    *
-   * fingerprinter = (dimensionfingerprinter >> 1)
-   * ^ (measurefingerprinter >> 1) ^ (complexfingerprinter >> 1)
-   * dimensionsfingerprinter = dimension1 ^ dimension2 ^ ...
-   * measuresfingerprinter = measure1 ^ measure2 ^ measure3 ...
-   * complexfingerprinter = complex1 ^ complex2 ^ complex3 ...
+   * fingerPrinter = (dimensionFingerPrinter >> 1)
+   * ^ (measureFingerPrinter >> 1) ^ (complexFingerPrinter >> 1)
+   * dimensionsFingerPrinter = dimension1 ^ dimension2 ^ ...
+   * measuresFingerPrinter = measure1 ^ measure2 ^ measure3 ...
+   * complexFingerPrinter = complex1 ^ complex2 ^ complex3 ...
    */
-  private long fingerprinter = Long.MAX_VALUE;
+  private long fingerPrinter = Long.MAX_VALUE;
 
   public SegmentProperties(List<ColumnSchema> columnsInTable) {
     dimensions = new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
@@ -139,7 +139,7 @@
     fillDimensionAndMeasureDetails(columnsInTable);
     dimensionOrdinalToChunkMapping =
         new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    blockTodimensionOrdinalMapping =
+    blockToDimensionOrdinalMapping =
         new HashMap<Integer, Set<Integer>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
     measuresOrdinalToChunkMapping =
         new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
@@ -177,33 +177,33 @@
     Iterator<Entry<Integer, Integer>> blockItr = blocks.iterator();
     while (blockItr.hasNext()) {
       Entry<Integer, Integer> block = blockItr.next();
-      Set<Integer> dimensionOrdinals = blockTodimensionOrdinalMapping.get(block.getValue());
+      Set<Integer> dimensionOrdinals = blockToDimensionOrdinalMapping.get(block.getValue());
       if (dimensionOrdinals == null) {
         dimensionOrdinals = new HashSet<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-        blockTodimensionOrdinalMapping.put(block.getValue(), dimensionOrdinals);
+        blockToDimensionOrdinalMapping.put(block.getValue(), dimensionOrdinals);
       }
       dimensionOrdinals.add(block.getKey());
     }
   }
 
   /**
-   * compare the segmentproperties based on fingerprinter
+   * compare the segment properties based on finger printer
    */
   @Override
   public boolean equals(Object obj) {
     if (!(obj instanceof SegmentProperties)) {
       return false;
     }
-    // If these two segmentproperties have different number of columns
+    // If these two segment properties have different number of columns
     // Return false directly
     SegmentProperties segmentProperties = (SegmentProperties) obj;
     if (this.getNumberOfColumns() != segmentProperties.getNumberOfColumns()) {
       return false;
     }
-    // Compare the fingerprinter
-    return getFingerprinter() != Long.MIN_VALUE &&
-            segmentProperties.getFingerprinter() != Long.MIN_VALUE &&
-            (getFingerprinter() == segmentProperties.getFingerprinter());
+    // Compare the finger printer
+    return getFingerPrinter() != Long.MIN_VALUE &&
+            segmentProperties.getFingerPrinter() != Long.MIN_VALUE &&
+            (getFingerPrinter() == segmentProperties.getFingerPrinter());
   }
 
   @Override
@@ -248,25 +248,25 @@
   }
 
   /**
-   * fingerprinter = (dimensionfingerprinter >> 1)
-   *   ^ (measurefingerprinter >> 1) ^ (complexfingerprinter >> 1)
-   * dimensionsfingerprinter = dimension1 ^ dimension2 ^ ...
-   * measuresfingerprinter = measure1 ^ measure2 ^ measure3 ...
-   * complexfingerprinter = complex1 ^ complex2 ^ complex3 ...
+   * fingerPrinter = (dimensionFingerPrinter >> 1)
+   *   ^ (measureFingerPrinter >> 1) ^ (complexFingerPrinter >> 1)
+   * dimensionsFingerPrinter = dimension1 ^ dimension2 ^ ...
+   * measuresFingerPrinter = measure1 ^ measure2 ^ measure3 ...
+   * complexFingerPrinter = complex1 ^ complex2 ^ complex3 ...
    */
-  protected long getFingerprinter() {
-    if (this.fingerprinter == Long.MAX_VALUE) {
+  protected long getFingerPrinter() {
+    if (this.fingerPrinter == Long.MAX_VALUE) {
       long dimensionsFingerPrinter = getFingerprinter(this.dimensions.stream()
               .map(t -> t.getColumnSchema()).collect(Collectors.toList()));
       long measuresFingerPrinter = getFingerprinter(this.measures.stream()
               .map(t -> t.getColumnSchema()).collect(Collectors.toList()));
       long complexFingerPrinter = getFingerprinter(this.complexDimensions.stream()
               .map(t -> t.getColumnSchema()).collect(Collectors.toList()));
-      this.fingerprinter = (dimensionsFingerPrinter >> DIMENSIONS_FINGER_PRINTER_SHIFT)
+      this.fingerPrinter = (dimensionsFingerPrinter >> DIMENSIONS_FINGER_PRINTER_SHIFT)
               ^ (measuresFingerPrinter >> MEASURES_FINGER_PRINTER_SHIFT)
               ^ (complexFingerPrinter >> COMPLEX_FINGER_PRINTER_SHIFT);
     }
-    return this.fingerprinter;
+    return this.fingerPrinter;
   }
 
   private long getFingerprinter(List<ColumnSchema> columns) {
@@ -312,7 +312,7 @@
       columnSchema = columnsInTable.get(counter);
       if (columnSchema.isDimensionColumn()) {
         // not adding the cardinality of the non dictionary
-        // column as it was not the part of mdkey
+        // column as it was not the part of MDKey
         if (CarbonUtil.hasEncoding(columnSchema.getEncodingList(), Encoding.DICTIONARY)
             && !isComplexDimensionStarted && columnSchema.getNumberOfChild() == 0) {
           this.numberOfDictDimensions++;
@@ -320,7 +320,7 @@
           if (columnSchema.isSortColumn()) {
             this.numberOfSortColumns++;
           }
-          // if it is a columnar dimension participated in mdkey then added
+          // if it is a columnar dimension participated in MDKey then added
           // key ordinal and dimension ordinal
           carbonDimension =
               new CarbonDimension(columnSchema, dimensionOrdinal++, keyOrdinal++, -1);
@@ -400,11 +400,11 @@
     for (int i = 0; i < parentDimension.getNumberOfChild(); i++) {
       CarbonDimension dimension = parentDimension.getListOfChildDimensions().get(i);
       if (dimension.getNumberOfChild() > 0) {
-        dimension.setComplexTypeOridnal(++complexDimensionOrdinal);
+        dimension.setComplexTypeOrdinal(++complexDimensionOrdinal);
         complexDimensionOrdinal = assignComplexOrdinal(dimension, complexDimensionOrdinal);
       } else {
         parentDimension.getListOfChildDimensions().get(i)
-            .setComplexTypeOridnal(++complexDimensionOrdinal);
+            .setComplexTypeOrdinal(++complexDimensionOrdinal);
       }
     }
     return complexDimensionOrdinal;
@@ -455,8 +455,8 @@
   /**
    * @return It returns block index to dimension ordinal mapping
    */
-  public Map<Integer, Set<Integer>> getBlockTodimensionOrdinalMapping() {
-    return blockTodimensionOrdinalMapping;
+  public Map<Integer, Set<Integer>> getBlockToDimensionOrdinalMapping() {
+    return blockToDimensionOrdinalMapping;
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
index e0c8c6e..032eb0f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
@@ -492,7 +492,7 @@
      */
     private Set<String> segmentIdSet;
     /**
-     * index which maps to segmentPropertiesWrpper Index from where segmentProperties
+     * index which maps to segmentPropertiesWrapper Index from where segmentProperties
      * can be retrieved
      */
     private int segmentPropertiesIndex;
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
index 0edab5f..168bbf0 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
@@ -233,7 +233,7 @@
 
     int compareResult = 0;
     // get the segment id
-    // converr seg ID to double.
+    // convert segment ID to double.
 
     double seg1 = Double.parseDouble(segment.getSegmentNo());
     double seg2 = Double.parseDouble(((TableBlockInfo) other).segment.getSegmentNo());
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
index e7ba267..fcb900c 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
@@ -84,10 +84,10 @@
   /**
    * @return whether column is dictionary column or not
    */
-  boolean isNoDicitionaryColumn();
+  boolean isNoDictionaryColumn();
 
   /**
-   * @return whether columns where explictly sorted or not
+   * @return whether columns where explicitly sorted or not
    */
   boolean isExplicitSorted();
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/AbstractDimensionColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/AbstractDimensionColumnPage.java
index a5bbc5f..baee982 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/AbstractDimensionColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/AbstractDimensionColumnPage.java
@@ -113,7 +113,7 @@
    * @return column is dictionary column or not
    */
   @Override
-  public boolean isNoDicitionaryColumn() {
+  public boolean isNoDictionaryColumn() {
     return false;
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/VariableLengthDimensionColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/VariableLengthDimensionColumnPage.java
index 2a71934..d823c47 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/VariableLengthDimensionColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/VariableLengthDimensionColumnPage.java
@@ -79,8 +79,7 @@
    */
   @Override
   public int fillRawData(int rowId, int offset, byte[] data) {
-    // no required in this case because this column chunk is not the part if
-    // mdkey
+    // no required in this case because this column chunk is not the part of MDKey
     return 0;
   }
 
@@ -101,7 +100,7 @@
    * @return whether column is dictionary column or not
    */
   @Override
-  public boolean isNoDicitionaryColumn() {
+  public boolean isNoDictionaryColumn() {
     return true;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/CarbonDataReaderFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/CarbonDataReaderFactory.java
index 2c599d4..a0e917f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/CarbonDataReaderFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/CarbonDataReaderFactory.java
@@ -60,10 +60,10 @@
    * @return dimension column data reader based on version number
    */
   public DimensionColumnChunkReader getDimensionColumnChunkReader(ColumnarFormatVersion version,
-      BlockletInfo blockletInfo, String filePath, boolean readPagebyPage) {
+      BlockletInfo blockletInfo, String filePath, boolean readPageByPage) {
     switch (version) {
       case V3:
-        if (readPagebyPage) {
+        if (readPageByPage) {
           return new DimensionChunkPageReaderV3(blockletInfo, filePath);
         } else {
           return new DimensionChunkReaderV3(blockletInfo, filePath);
@@ -82,10 +82,10 @@
    * @return measure column data reader based on version number
    */
   public MeasureColumnChunkReader getMeasureColumnChunkReader(ColumnarFormatVersion version,
-      BlockletInfo blockletInfo, String filePath, boolean readPagebyPage) {
+      BlockletInfo blockletInfo, String filePath, boolean readPageByPage) {
     switch (version) {
       case V3:
-        if (readPagebyPage) {
+        if (readPageByPage) {
           return new MeasureChunkPageReaderV3(blockletInfo, filePath);
         } else {
           return new MeasureChunkReaderV3(blockletInfo, filePath);
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/AbstractDimensionChunkReader.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/AbstractDimensionChunkReader.java
index d14e69c..e6812c5 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/AbstractDimensionChunkReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/AbstractDimensionChunkReader.java
@@ -91,7 +91,7 @@
     // read the column chunk based on block index and add
     DimensionRawColumnChunk[] dataChunks =
         new DimensionRawColumnChunk[dimensionChunksOffset.size()];
-    // if blocklet index is empty then return empry data chunk
+    // if blocklet index is empty then return empty data chunk
     if (columnIndexRange.length == 0) {
       return dataChunks;
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkPageReaderV3.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkPageReaderV3.java
index 1ce8465..deafe48 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkPageReaderV3.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkPageReaderV3.java
@@ -155,7 +155,7 @@
           CarbonMetadataUtil.getCompressorNameFromChunkMeta(pageMetadata.getChunk_meta()));
     }
     // calculating the start point of data
-    // as buffer can contain multiple column data, start point will be datachunkoffset +
+    // as buffer can contain multiple column data, start point will be data chunk offset +
     // data chunk length + page offset
     long offset = dimensionRawColumnChunk.getOffSet() + dimensionChunksLength
         .get(dimensionRawColumnChunk.getColumnIndex()) + dataChunk3.getPage_offset()
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkReaderV3.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkReaderV3.java
index 1cfcbd1..d53c9d3 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkReaderV3.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/DimensionChunkReaderV3.java
@@ -98,7 +98,7 @@
     // column other than last column we can subtract the offset of current column with
     // next column and get the total length.
     // but for last column we need to use lastDimensionOffset which is the end position
-    // of the last dimension, we can subtract current dimension offset from lastDimesionOffset
+    // of the last dimension, we can subtract current dimension offset from lastDimensionOffset
     if (dimensionChunksOffset.size() - 1 == columnIndex) {
       length = (int) (lastDimensionOffsets - currentDimensionOffset);
     } else {
@@ -225,7 +225,7 @@
         pageMetadata.getChunk_meta());
     this.compressor = CompressorFactory.getInstance().getCompressor(compressorName);
     // calculating the start point of data
-    // as buffer can contain multiple column data, start point will be datachunkoffset +
+    // as buffer can contain multiple column data, start point will be data chunk offset +
     // data chunk length + page offset
     int offset = (int) rawColumnPage.getOffSet() + dimensionChunksLength
         .get(rawColumnPage.getColumnIndex()) + dataChunk3.getPage_offset().get(pageNumber);
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkPageReaderV3.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkPageReaderV3.java
index 7776562..6ce717c 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkPageReaderV3.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkPageReaderV3.java
@@ -147,7 +147,7 @@
         pageMetadata.getChunk_meta());
     this.compressor = CompressorFactory.getInstance().getCompressor(compressorName);
     // calculating the start point of data
-    // as buffer can contain multiple column data, start point will be datachunkoffset +
+    // as buffer can contain multiple column data, start point will be data chunk offset +
     // data chunk length + page offset
     long offset = rawColumnPage.getOffSet() + measureColumnChunkLength
         .get(rawColumnPage.getColumnIndex()) + dataChunk3.getPage_offset().get(pageNumber);
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkReaderV3.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkReaderV3.java
index 3a8e5f0..7ad92e5 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkReaderV3.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/MeasureChunkReaderV3.java
@@ -215,7 +215,7 @@
         pageMetadata.getChunk_meta());
     this.compressor = CompressorFactory.getInstance().getCompressor(compressorName);
     // calculating the start point of data
-    // as buffer can contain multiple column data, start point will be datachunkoffset +
+    // as buffer can contain multiple column data, start point will be data chunk offset +
     // data chunk length + page offset
     int offset = (int) rawColumnChunk.getOffSet() +
         measureColumnChunkLength.get(rawColumnChunk.getColumnIndex()) +
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
index 5f1cac9..e63757d 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
@@ -237,7 +237,7 @@
   }
 
   @Override
-  public boolean isNoDicitionaryColumn() {
+  public boolean isNoDictionaryColumn() {
     return true;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionDataChunkStore.java
index 8972ddb..6a0f998 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionDataChunkStore.java
@@ -22,12 +22,12 @@
 
 /**
  * Interface responsibility is to store dimension data in memory.
- * storage can be on heap or offheap.
+ * storage can be on heap or off-heap.
  */
 public interface DimensionDataChunkStore {
 
   /**
-   * Below method will be used to put the rows and its metadata in offheap
+   * Below method will be used to put the rows and its metadata in off-heap
    *
    * @param invertedIndex        inverted index to be stored
    * @param invertedIndexReverse inverted index reverse to be stored
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/LocalDictDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/LocalDictDimensionDataChunkStore.java
index c57cc8d..de2b720 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/LocalDictDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/LocalDictDimensionDataChunkStore.java
@@ -25,7 +25,7 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonDictionary;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
-import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertableVector;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertibleVector;
 import org.apache.carbondata.core.util.CarbonUtil;
 
 /**
@@ -48,7 +48,7 @@
   }
 
   /**
-   * Below method will be used to put the rows and its metadata in offheap
+   * Below method will be used to put the rows and its metadata in off-heap
    *
    * @param invertedIndex        inverted index to be stored
    * @param invertedIndexReverse inverted index reverse to be stored
@@ -87,8 +87,8 @@
         dictionaryVector.putInt(i, surrogate);
       }
     }
-    if (dictionaryVector instanceof ConvertableVector) {
-      ((ConvertableVector) dictionaryVector).convert();
+    if (dictionaryVector instanceof ConvertibleVector) {
+      ((ConvertibleVector) dictionaryVector).convert();
     }
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeAbsractDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeAbstractDimensionDataChunkStore.java
similarity index 92%
rename from core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeAbsractDimensionDataChunkStore.java
rename to core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeAbstractDimensionDataChunkStore.java
index 0a53ec6..6725393 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeAbsractDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeAbstractDimensionDataChunkStore.java
@@ -23,7 +23,7 @@
 /**
  * Responsibility is to store dimension data
  */
-public abstract class SafeAbsractDimensionDataChunkStore implements DimensionDataChunkStore {
+public abstract class SafeAbstractDimensionDataChunkStore implements DimensionDataChunkStore {
 
   /**
    * data chunk for dimension column
@@ -43,19 +43,19 @@
   /**
    * to check whether dimension column was explicitly sorted or not
    */
-  protected boolean isExplictSorted;
+  protected boolean isExplicitSorted;
 
   /**
    * Constructor
    *
    * @param isInvertedIdex is inverted index present
    */
-  public SafeAbsractDimensionDataChunkStore(boolean isInvertedIdex) {
-    this.isExplictSorted = isInvertedIdex;
+  public SafeAbstractDimensionDataChunkStore(boolean isInvertedIdex) {
+    this.isExplicitSorted = isInvertedIdex;
   }
 
   /**
-   * Below method will be used to put the rows and its metadata in offheap
+   * Below method will be used to put the rows and its metadata in off-heap
    *
    * @param invertedIndex        inverted index to be stored
    * @param invertedIndexReverse inverted index reverse to be stored
@@ -126,7 +126,7 @@
    */
   @Override
   public boolean isExplicitSorted() {
-    return isExplictSorted;
+    return isExplicitSorted;
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeFixedLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeFixedLengthDimensionDataChunkStore.java
index 80640ab..4327b7e 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeFixedLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeFixedLengthDimensionDataChunkStore.java
@@ -26,14 +26,14 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
-import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertableVector;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertibleVector;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
 
 /**
  * Below class will be used to store fixed length dimension data
  */
-public class SafeFixedLengthDimensionDataChunkStore extends SafeAbsractDimensionDataChunkStore {
+public class SafeFixedLengthDimensionDataChunkStore extends SafeAbstractDimensionDataChunkStore {
 
   /**
    * Size of each value
@@ -58,8 +58,8 @@
     vector = ColumnarVectorWrapperDirectFactory
         .getDirectVectorWrapperFactory(vector, invertedIndex, nullBits, deletedRows, false, false);
     fillVector(data, vectorInfo, vector);
-    if (vector instanceof ConvertableVector) {
-      ((ConvertableVector) vector).convert();
+    if (vector instanceof ConvertibleVector) {
+      ((ConvertibleVector) vector).convert();
     }
   }
 
@@ -102,8 +102,8 @@
    */
   @Override
   public byte[] getRow(int rowId) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
-    if (isExplictSorted) {
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
+    if (isExplicitSorted) {
       rowId = invertedIndexReverse[rowId];
     }
     // creating a row
@@ -123,8 +123,8 @@
    */
   @Override
   public int getSurrogate(int index) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
-    if (isExplictSorted) {
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
+    if (isExplicitSorted) {
       index = invertedIndexReverse[index];
     }
     // below part is to convert the byte array to surrogate value
@@ -141,8 +141,8 @@
    */
   @Override
   public void fillRow(int rowId, byte[] buffer, int offset) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
-    if (isExplictSorted) {
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
+    if (isExplicitSorted) {
       rowId = invertedIndexReverse[rowId];
     }
     //copy the row from memory block based on offset
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableIntLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableIntLengthDimensionDataChunkStore.java
index 8dc4c0b..c0ea251 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableIntLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableIntLengthDimensionDataChunkStore.java
@@ -23,7 +23,7 @@
 
 /**
  * Below class is responsible to store variable long length(>32000) dimension data chunk in
- * memory. Memory occupied can be on heap or offheap using unsafe interface
+ * memory. Memory occupied can be on heap or off-heap using unsafe interface
  */
 public class SafeVariableIntLengthDimensionDataChunkStore
     extends SafeVariableLengthDimensionDataChunkStore {
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java
index 5aab8d9..972fa97 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java
@@ -26,16 +26,16 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
-import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertableVector;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertibleVector;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
 
 /**
  * Below class is responsible to store variable length dimension data chunk in
- * memory. Memory occupied can be on heap or offheap using unsafe interface
+ * memory. Memory occupied can be on heap or off-heap using unsafe interface
  */
 public abstract class SafeVariableLengthDimensionDataChunkStore
-    extends SafeAbsractDimensionDataChunkStore {
+    extends SafeAbstractDimensionDataChunkStore {
 
   /**
    * total number of rows
@@ -59,7 +59,7 @@
   }
 
   /**
-   * Below method will be used to put the rows and its metadata in offheap
+   * Below method will be used to put the rows and its metadata in off-heap
    *
    * @param invertedIndex        inverted index to be stored
    * @param invertedIndexReverse inverted index reverse to be stored
@@ -112,8 +112,8 @@
         .getDirectVectorWrapperFactory(vector, invertedIndex, new BitSet(), vectorInfo.deletedRows,
             false, false);
     vectorFiller.fillVector(data, vector);
-    if (vector instanceof ConvertableVector) {
-      ((ConvertableVector) vector).convert();
+    if (vector instanceof ConvertibleVector) {
+      ((ConvertibleVector) vector).convert();
     }
   }
 
@@ -123,8 +123,8 @@
 
   @Override
   public byte[] getRow(int rowId) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
-    if (isExplictSorted) {
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
+    if (isExplicitSorted) {
       rowId = invertedIndexReverse[rowId];
     }
     // now to get the row from memory block we need to do following thing
@@ -150,8 +150,8 @@
   @Override
   public void fillRow(int rowId, CarbonColumnVector vector, int vectorRow) {
     vector.setDictionary(null);
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
-    if (isExplictSorted) {
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
+    if (isExplicitSorted) {
       rowId = invertedIndexReverse[rowId];
     }
     // now to get the row from memory block we need to do following thing
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableShortLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableShortLengthDimensionDataChunkStore.java
index daac725..8a66e07 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableShortLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableShortLengthDimensionDataChunkStore.java
@@ -23,7 +23,7 @@
 
 /**
  * Below class is responsible to store variable long length(>32000) dimension data chunk in
- * memory. Memory occupied can be on heap or offheap using unsafe interface
+ * memory. Memory occupied can be on heap or off-heap using unsafe interface
  */
 public class SafeVariableShortLengthDimensionDataChunkStore
     extends SafeVariableLengthDimensionDataChunkStore {
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeAbstractDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeAbstractDimensionDataChunkStore.java
index 23376d3..0ae3181 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeAbstractDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeAbstractDimensionDataChunkStore.java
@@ -28,7 +28,7 @@
 
 /**
  * Responsibility is to store dimension data in memory. storage can be on heap
- * or offheap.
+ * or off-heap.
  */
 public abstract class UnsafeAbstractDimensionDataChunkStore implements DimensionDataChunkStore {
 
@@ -68,20 +68,20 @@
    * Constructor
    *
    * @param totalSize      total size of the data to be kept
-   * @param isInvertedIdex is inverted index present
+   * @param isInvertedIndex is inverted index present
    * @param numberOfRows   total number of rows
    */
-  public UnsafeAbstractDimensionDataChunkStore(long totalSize, boolean isInvertedIdex,
+  public UnsafeAbstractDimensionDataChunkStore(long totalSize, boolean isInvertedIndex,
       int numberOfRows, int dataLength) {
     // allocating the data page
     this.dataPageMemoryBlock = UnsafeMemoryManager.allocateMemoryWithRetry(taskId, totalSize);
 
     this.dataLength = dataLength;
-    this.isExplicitSorted = isInvertedIdex;
+    this.isExplicitSorted = isInvertedIndex;
   }
 
   /**
-   * Below method will be used to put the rows and its metadata in offheap
+   * Below method will be used to put the rows and its metadata in off-heap
    *
    * @param invertedIndex        inverted index to be stored
    * @param invertedIndexReverse inverted index reverse to be stored
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
index f2464eb..9029772 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
@@ -22,7 +22,7 @@
 
 /**
  * Below class is responsible to store fixed length dimension data chunk in
- * memory Memory occupied can be on heap or offheap using unsafe interface
+ * memory Memory occupied can be on heap or off-heap using unsafe interface
  */
 public class UnsafeFixedLengthDimensionDataChunkStore
     extends UnsafeAbstractDimensionDataChunkStore {
@@ -36,12 +36,12 @@
    * Constructor
    *
    * @param columnValueSize value of each column
-   * @param isInvertedIdex  is inverted index present
+   * @param isInvertedIndex  is inverted index present
    * @param numberOfRows    total number of rows
    */
   public UnsafeFixedLengthDimensionDataChunkStore(long totalDataSize, int columnValueSize,
-      boolean isInvertedIdex, int numberOfRows, int dataLength) {
-    super(totalDataSize, isInvertedIdex, numberOfRows, dataLength);
+      boolean isInvertedIndex, int numberOfRows, int dataLength) {
+    super(totalDataSize, isInvertedIndex, numberOfRows, dataLength);
     this.columnValueSize = columnValueSize;
   }
 
@@ -52,7 +52,7 @@
    */
   @Override
   public byte[] getRow(int rowId) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
     if (isExplicitSorted) {
       rowId = CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
           dataPageMemoryBlock.getBaseOffset() + this.invertedIndexReverseOffset + ((long)rowId
@@ -77,7 +77,7 @@
    */
   @Override
   public int getSurrogate(int index) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
     if (isExplicitSorted) {
       index = CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
           dataPageMemoryBlock.getBaseOffset() + this.invertedIndexReverseOffset + ((long)index
@@ -104,7 +104,7 @@
    */
   @Override
   public void fillRow(int rowId, byte[] buffer, int offset) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
     if (isExplicitSorted) {
       rowId = CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
           dataPageMemoryBlock.getBaseOffset() + this.invertedIndexReverseOffset + ((long)rowId
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableIntLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableIntLengthDimensionDataChunkStore.java
index 80a7482..6fed7c6 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableIntLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableIntLengthDimensionDataChunkStore.java
@@ -23,13 +23,13 @@
 
 /**
  * Below class is responsible to store variable length dimension data chunk in
- * memory Memory occupied can be on heap or offheap using unsafe interface
+ * memory Memory occupied can be on heap or off-heap using unsafe interface
  */
 public class UnsafeVariableIntLengthDimensionDataChunkStore
     extends UnsafeVariableLengthDimensionDataChunkStore {
-  public UnsafeVariableIntLengthDimensionDataChunkStore(long totalSize, boolean isInvertedIdex,
+  public UnsafeVariableIntLengthDimensionDataChunkStore(long totalSize, boolean isInvertedIndex,
       int numberOfRows, int dataLength) {
-    super(totalSize, isInvertedIdex, numberOfRows, dataLength);
+    super(totalSize, isInvertedIndex, numberOfRows, dataLength);
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
index bd1e7c7..f44ea10 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
@@ -26,7 +26,7 @@
 
 /**
  * Below class is responsible to store variable length dimension data chunk in
- * memory Memory occupied can be on heap or offheap using unsafe interface
+ * memory Memory occupied can be on heap or off-heap using unsafe interface
  */
 public abstract class UnsafeVariableLengthDimensionDataChunkStore
     extends UnsafeAbstractDimensionDataChunkStore {
@@ -49,16 +49,16 @@
    */
   private byte[] value;
 
-  public UnsafeVariableLengthDimensionDataChunkStore(long totalSize, boolean isInvertedIdex,
+  public UnsafeVariableLengthDimensionDataChunkStore(long totalSize, boolean isInvertedIndex,
       int numberOfRows, int dataLength) {
-    super(totalSize, isInvertedIdex, numberOfRows, dataLength);
+    super(totalSize, isInvertedIndex, numberOfRows, dataLength);
     this.numberOfRows = numberOfRows;
     // initials size assigning to some random value
     this.value = new byte[20];
   }
 
   /**
-   * Below method will be used to put the rows and its metadata in offheap
+   * Below method will be used to put the rows and its metadata in off-heap
    *
    * @param invertedIndex        inverted index to be stored
    * @param invertedIndexReverse inverted index reverse to be stored
@@ -147,7 +147,7 @@
    * @return actual row id
    */
   private int getRowId(int rowId) {
-    // if column was explicitly sorted we need to get the rowid based inverted index reverse
+    // if column was explicitly sorted we need to get the row id based inverted index reverse
     if (isExplicitSorted) {
       rowId = CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
           dataPageMemoryBlock.getBaseOffset() + this.invertedIndexReverseOffset + ((long)rowId
@@ -181,10 +181,10 @@
     int length = 0;
     // calculating the length of data
     if (rowId < numberOfRows - 1) {
-      int OffsetOfNextdata = CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
+      int OffsetOfNextData = CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
           dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + ((rowId + 1)
               * CarbonCommonConstants.INT_SIZE_IN_BYTE));
-      length = OffsetOfNextdata - (currentDataOffset + getLengthSize());
+      length = OffsetOfNextData - (currentDataOffset + getLengthSize());
     } else {
       // for last record we need to subtract with data length
       length = this.dataLength - currentDataOffset;
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableShortLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableShortLengthDimensionDataChunkStore.java
index 502fc48..5f67b61 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableShortLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableShortLengthDimensionDataChunkStore.java
@@ -23,13 +23,13 @@
 
 /**
  * Below class is responsible to store variable length dimension data chunk in
- * memory Memory occupied can be on heap or offheap using unsafe interface
+ * memory Memory occupied can be on heap or off-heap using unsafe interface
  */
 public class UnsafeVariableShortLengthDimensionDataChunkStore
     extends UnsafeVariableLengthDimensionDataChunkStore {
-  public UnsafeVariableShortLengthDimensionDataChunkStore(long totalSize, boolean isInvertedIdex,
+  public UnsafeVariableShortLengthDimensionDataChunkStore(long totalSize, boolean isInvertedIndex,
       int numberOfRows, int dataLength) {
-    super(totalSize, isInvertedIdex, numberOfRows, dataLength);
+    super(totalSize, isInvertedIndex, numberOfRows, dataLength);
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ByteArrayBlockIndexerStorage.java b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ByteArrayBlockIndexerStorage.java
index f5117cc..b65ac52 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ByteArrayBlockIndexerStorage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ByteArrayBlockIndexerStorage.java
@@ -46,17 +46,17 @@
    */
   private ByteArrayColumnWithRowId[] createColumnWithRowId(byte[][] dataPage,
       boolean isNoDictionary) {
-    ByteArrayColumnWithRowId[] columnWithIndexs = new ByteArrayColumnWithRowId[dataPage.length];
+    ByteArrayColumnWithRowId[] columnWithIndexes = new ByteArrayColumnWithRowId[dataPage.length];
     if (isNoDictionary) {
-      for (short i = 0; i < columnWithIndexs.length; i++) {
-        columnWithIndexs[i] = new ByteArrayColumnWithRowId(dataPage[i], i);
+      for (short i = 0; i < columnWithIndexes.length; i++) {
+        columnWithIndexes[i] = new ByteArrayColumnWithRowId(dataPage[i], i);
       }
     } else {
-      for (short i = 0; i < columnWithIndexs.length; i++) {
-        columnWithIndexs[i] = new ByteArrayColumnWithRowId(dataPage[i], i);
+      for (short i = 0; i < columnWithIndexes.length; i++) {
+        columnWithIndexes[i] = new ByteArrayColumnWithRowId(dataPage[i], i);
       }
     }
-    return columnWithIndexs;
+    return columnWithIndexes;
   }
 
   private short[] extractDataAndReturnRowId(ByteArrayColumnWithRowId[] dataWithRowId,
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ObjectArrayBlockIndexerStorage.java b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ObjectArrayBlockIndexerStorage.java
index 27c26a9..23f1c67 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ObjectArrayBlockIndexerStorage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ObjectArrayBlockIndexerStorage.java
@@ -45,12 +45,12 @@
    * @return
    */
   private ObjectColumnWithRowId[] createColumnWithRowId(Object[] dataPage) {
-    ObjectColumnWithRowId[] columnWithIndexs =
+    ObjectColumnWithRowId[] columnWithIndexes =
         new ObjectColumnWithRowId[dataPage.length];
-    for (short i = 0; i < columnWithIndexs.length; i++) {
-      columnWithIndexs[i] = new ObjectColumnWithRowId(dataPage[i], i, dataType);
+    for (short i = 0; i < columnWithIndexes.length; i++) {
+      columnWithIndexes[i] = new ObjectColumnWithRowId(dataPage[i], i, dataType);
     }
-    return columnWithIndexs;
+    return columnWithIndexes;
   }
 
   private short[] extractDataAndReturnRowId(ObjectColumnWithRowId[] dataWithRowId,
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/compression/CompressorFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/compression/CompressorFactory.java
index e695bda..5b37cae 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/compression/CompressorFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/compression/CompressorFactory.java
@@ -103,8 +103,8 @@
               + " found '%s'", compressorClassName, ((Compressor) instance).getName()));
         }
         allSupportedCompressors.put(compressorClassName, (Compressor) instance);
-        LOGGER.info(
-            String.format("sucessfully register compressor %s to carbondata", compressorClassName));
+        LOGGER.info(String.format(
+            "successfully register compressor %s to carbondata", compressorClassName));
         return (Compressor) instance;
       } else {
         throw new RuntimeException(
@@ -142,7 +142,7 @@
   }
 
   // if we specify the compressor name in table property, carbondata now will convert the
-  // property value to lowercase, so here we will ingore the case and find the real name.
+  // property value to lowercase, so here we will ignore the case and find the real name.
   private String getInternalCompressorName(String name) {
     for (String key : allSupportedCompressors.keySet()) {
       if (key.equalsIgnoreCase(name)) {
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/compression/GzipCompressor.java b/core/src/main/java/org/apache/carbondata/core/datastore/compression/GzipCompressor.java
index 390029a..102786b 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/compression/GzipCompressor.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/compression/GzipCompressor.java
@@ -143,7 +143,7 @@
   @Override
   public long rawUncompress(byte[] input, byte[] output) {
     //gzip api doesnt have rawUncompress yet.
-    throw new RuntimeException("Not implemented rawUcompress for gzip yet");
+    throw new RuntimeException("Not implemented rawUncompress for gzip yet");
   }
 
   @Override
@@ -165,6 +165,6 @@
   @Override
   public int rawUncompress(byte[] data, int offset, int length, byte[] output) {
     //gzip api doesnt have rawUncompress yet.
-    throw new RuntimeException("Not implemented rawUcompress for gzip yet");
+    throw new RuntimeException("Not implemented rawUncompress for gzip yet");
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
index 7e12dc9..f5cb539 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
@@ -160,9 +160,9 @@
   }
 
   @Override
-  public boolean renameTo(String changetoName) {
+  public boolean renameTo(String changeToName) {
     try {
-      return fileSystem.rename(path, new Path(changetoName));
+      return fileSystem.rename(path, new Path(changeToName));
     } catch (IOException e) {
       throw new CarbonFileException("Failed to rename file: ", e);
     }
@@ -293,7 +293,7 @@
   }
 
   /**
-   * return the datainputStream which is seek to the offset of file
+   * return the DataInputStream which is seek to the offset of file
    *
    * @return DataInputStream
    * @throws IOException
@@ -346,7 +346,7 @@
     } else if ("LZ4".equalsIgnoreCase(compressorName)) {
       return Lz4Codec.class.getName();
     } else {
-      throw new IOException("Unsuppotted compressor: " + compressorName);
+      throw new IOException("Unsupported compressor: " + compressorName);
     }
   }
 
@@ -408,7 +408,7 @@
         permission =
             FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(fileSystem.getConf()));
       }
-      // Pass the permissions duringg file creation itself
+      // Pass the permissions during file creation itself
       fileSystem
           .create(path, permission, false, fileSystem.getConf().getInt("io.file.buffer.size", 4096),
               fileSystem.getDefaultReplication(path), fileSystem.getDefaultBlockSize(path), null)
@@ -437,12 +437,12 @@
     if (fileSystem.exists(path)) {
       return false;
     } else {
-      // Pass the permissions duringg file creation itself
+      // Pass the permissions during file creation itself
       fileSystem.create(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL), false,
           fileSystem.getConf().getInt("io.file.buffer.size", 4096),
           fileSystem.getDefaultReplication(path), fileSystem.getDefaultBlockSize(path), null)
           .close();
-      // haddop masks the permission accoding to configured permission, so need to set permission
+      // hadoop masks the permission according to configured permission, so need to set permission
       // forcefully
       fileSystem.setPermission(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
       return true;
@@ -455,14 +455,14 @@
     try {
       listStatus = fileSystem.listStatus(path);
     } catch (IOException e) {
-      LOGGER.warn("Exception occured: " + e.getMessage(), e);
+      LOGGER.warn("Exception occurred: " + e.getMessage(), e);
       return new CarbonFile[0];
     }
     return getFiles(listStatus);
   }
 
   /**
-   * Get the CarbonFiles from filestatus array
+   * Get the CarbonFiles from FileStatus array
    */
   protected abstract CarbonFile[] getFiles(FileStatus[] listStatus);
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AlluxioCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AlluxioCarbonFile.java
index 1a748af..ae2cd7c 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AlluxioCarbonFile.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AlluxioCarbonFile.java
@@ -71,10 +71,10 @@
   }
 
   @Override
-  public boolean renameForce(String changetoName) {
+  public boolean renameForce(String changeToName) {
     try {
       if (fileSystem instanceof DistributedFileSystem) {
-        ((DistributedFileSystem) fileSystem).rename(path, new Path(changetoName),
+        ((DistributedFileSystem) fileSystem).rename(path, new Path(changeToName),
             org.apache.hadoop.fs.Options.Rename.OVERWRITE);
         return true;
       }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/HDFSCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/HDFSCarbonFile.java
index 7d36f0e..53d2e99 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/HDFSCarbonFile.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/HDFSCarbonFile.java
@@ -72,22 +72,22 @@
   }
 
   @Override
-  public boolean renameForce(String changetoName) {
+  public boolean renameForce(String changeToName) {
     try {
       if (fileSystem instanceof DistributedFileSystem) {
-        ((DistributedFileSystem) fileSystem).rename(path, new Path(changetoName),
+        ((DistributedFileSystem) fileSystem).rename(path, new Path(changeToName),
             org.apache.hadoop.fs.Options.Rename.OVERWRITE);
         return true;
       } else if ((fileSystem instanceof FilterFileSystem) && (((FilterFileSystem) fileSystem)
           .getRawFileSystem() instanceof DistributedFileSystem)) {
         ((DistributedFileSystem) ((FilterFileSystem) fileSystem).getRawFileSystem())
-            .rename(path, new Path(changetoName), org.apache.hadoop.fs.Options.Rename.OVERWRITE);
+            .rename(path, new Path(changeToName), org.apache.hadoop.fs.Options.Rename.OVERWRITE);
         return true;
       } else {
-        return fileSystem.rename(path, new Path(changetoName));
+        return fileSystem.rename(path, new Path(changeToName));
       }
     } catch (IOException e) {
-      LOGGER.error("Exception occured: " + e.getMessage(), e);
+      LOGGER.error("Exception occurred: " + e.getMessage(), e);
       return false;
     }
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java
index 1bb1c99..1cc7242 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java
@@ -117,7 +117,7 @@
     try {
       return file.getCanonicalPath();
     } catch (IOException e) {
-      LOGGER.error("Exception occured" + e.getMessage(), e);
+      LOGGER.error("Exception occurred" + e.getMessage(), e);
     }
     return null;
   }
@@ -137,9 +137,9 @@
     return file.length();
   }
 
-  public boolean renameTo(String changetoName) {
-    changetoName = FileFactory.getUpdatedFilePath(changetoName);
-    return file.renameTo(new File(changetoName));
+  public boolean renameTo(String changeToName) {
+    changeToName = FileFactory.getUpdatedFilePath(changeToName);
+    return file.renameTo(new File(changeToName));
   }
 
   public boolean delete() {
@@ -256,7 +256,7 @@
       tempFile.renameForce(fileName);
       fileTruncatedSuccessfully = true;
     } catch (IOException e) {
-      LOGGER.error("Exception occured while truncating the file " + e.getMessage(), e);
+      LOGGER.error("Exception occurred while truncating the file " + e.getMessage(), e);
     } finally {
       CarbonUtil.closeStreams(source, destination);
     }
@@ -331,7 +331,7 @@
   }
 
   /**
-   * return the datainputStream which is seek to the offset of file
+   * return the DataInputStream which is seek to the offset of file
    *
    * @param bufferSize
    * @param offset
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/S3CarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/S3CarbonFile.java
index c0e9aba..5136b8e 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/S3CarbonFile.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/S3CarbonFile.java
@@ -59,15 +59,15 @@
           Refer CARBONDATA-2670 for tracking this.
    */
   @Override
-  public boolean renameForce(String changetoName) {
+  public boolean renameForce(String changeToName) {
     try {
       // check if any file with the new name exists and delete it.
-      CarbonFile newCarbonFile = FileFactory.getCarbonFile(changetoName);
+      CarbonFile newCarbonFile = FileFactory.getCarbonFile(changeToName);
       newCarbonFile.delete();
       // rename the old file to the new name.
-      return fileSystem.rename(path, new Path(changetoName));
+      return fileSystem.rename(path, new Path(changeToName));
     } catch (IOException e) {
-      LOGGER.error("Exception occured: " + e.getMessage(), e);
+      LOGGER.error("Exception occurred: " + e.getMessage(), e);
       return false;
     }
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
index 4f90cd1..4b9294c 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
@@ -90,7 +90,7 @@
         return false;
       }
     } catch (IOException e) {
-      LOGGER.error("Exception occured" + e.getMessage(), e);
+      LOGGER.error("Exception occurred" + e.getMessage(), e);
       return false;
     }
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
index 6fe3242..1233b5f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
@@ -175,7 +175,7 @@
   }
 
   /**
-   * Need carbonfile object path because depends on file format implementation
+   * Need carbon file object path because depends on file format implementation
    * path will be formatted.
    */
   public static String getFormattedPath(String path) {
@@ -224,7 +224,7 @@
   }
 
   /**
-   * return the datainputStream which is seek to the offset of file
+   * return the DataInputStream which is seek to the offset of file
    *
    * @param path
    * @param bufferSize
@@ -354,7 +354,7 @@
   }
 
   /**
-   * for getting the dataoutput stream using the hdfs filesystem append API.
+   * for getting the DataOutputStream using the hdfs filesystem append API.
    *
    * @param path
    * @return
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileReaderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileReaderImpl.java
index 4b2c368..9d7e2f1 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileReaderImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileReaderImpl.java
@@ -82,7 +82,7 @@
   }
 
   /**
-   * This method will be used to read int from file from postion(offset), here
+   * This method will be used to read int from file from position(offset), here
    * length will be always 4 because int byte size if 4
    *
    * @param filePath fully qualified file path
@@ -97,7 +97,7 @@
   }
 
   /**
-   * This method will be used to read int from file from postion(offset), here
+   * This method will be used to read int from file from position(offset), here
    * length will be always 4 because int byte size if 4
    *
    * @param filePath fully qualified file path
@@ -111,7 +111,7 @@
   }
 
   /**
-   * This method will be used to read int from file from postion(offset), here
+   * This method will be used to read int from file from position(offset), here
    * length will be always 4 because int byte size if 4
    *
    * @param filePath fully qualified file path
@@ -189,7 +189,7 @@
   }
 
   /**
-   * This method will be used to read long from file from postion(offset), here
+   * This method will be used to read long from file from position(offset), here
    * length will be always 8 because int byte size is 8
    *
    * @param filePath fully qualified file path
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileTypeInterface.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileTypeInterface.java
index f9de81a..8127f26 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileTypeInterface.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileTypeInterface.java
@@ -39,7 +39,7 @@
    * Check if the FileSystem mapped with the given path is supported or not.
    *
    * @param path path of the file
-   * @return true if supported, fasle if not supported
+   * @return true if supported, false if not supported
    */
   public boolean isPathSupported(String path);
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
index 0722ddf..45a5658 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
@@ -31,7 +31,7 @@
 import org.apache.carbondata.core.util.DataTypeUtil;
 
 /**
- * holds the complex columndata and its children data
+ * holds the complex column data and its children data
  */
 public class ComplexColumnPage {
 
@@ -63,7 +63,7 @@
   }
 
   /**
-   * below method will be used to initlize the column page of complex type
+   * below method will be used to initialize the column page of complex type
    * @param columnToDictMap dictionary map
    * @param pageSize number of records
    */
@@ -180,7 +180,7 @@
    * return the column page
    * @param complexColumnIndex
    * complexColumnIndex of column
-   * @return colum page
+   * @return column page
    */
   public ColumnPage getColumnPage(int complexColumnIndex) {
     assert (complexColumnIndex <= this.complexColumnIndex);
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/DecoderBasedFallbackEncoder.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/DecoderBasedFallbackEncoder.java
index 017e605..30d07f7 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/DecoderBasedFallbackEncoder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/DecoderBasedFallbackEncoder.java
@@ -103,7 +103,7 @@
             encodedColumnPage.getActualPage().getPageSize());
 
     // uncompressed data from encoded column page is dictionary data, get the dictionary data using
-    // keygenerator
+    // KeyGenerator
     KeyGenerator keyGenerator = KeyGeneratorFactory
         .getKeyGenerator(new int[] { CarbonCommonConstants.LOCAL_DICTIONARY_MAX + 1 });
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/EncodedTablePage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/EncodedTablePage.java
index 8a3482a..d379a52 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/EncodedTablePage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/EncodedTablePage.java
@@ -33,7 +33,7 @@
   // number of row in this page
   private int pageSize;
 
-  // size in bytes of all encoded columns (including data and metadate)
+  // size in bytes of all encoded columns (including data and metadata)
   private int encodedSize;
 
   public static EncodedTablePage newInstance(int pageSize,
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/LocalDictColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/LocalDictColumnPage.java
index 797dd11..05fed58 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/LocalDictColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/LocalDictColumnPage.java
@@ -76,7 +76,7 @@
   /**
    * Create a new column page with input data type and page size.
    */
-  protected LocalDictColumnPage(ColumnPage actualDataColumnPage, ColumnPage encodedColumnpage,
+  protected LocalDictColumnPage(ColumnPage actualDataColumnPage, ColumnPage encodedColumnPage,
       LocalDictionaryGenerator localDictionaryGenerator, boolean isComplexTypePrimitive,
       boolean isDecoderBasedFallBackEnabled) {
     super(actualDataColumnPage.getColumnPageEncoderMeta(), actualDataColumnPage.getPageSize());
@@ -86,13 +86,13 @@
       pageLevelDictionary = new PageLevelDictionary(localDictionaryGenerator,
           actualDataColumnPage.getColumnSpec().getFieldName(), actualDataColumnPage.getDataType(),
           isComplexTypePrimitive, actualDataColumnPage.getColumnCompressorName());
-      this.encodedDataColumnPage = encodedColumnpage;
+      this.encodedDataColumnPage = encodedColumnPage;
       this.keyGenerator = KeyGeneratorFactory
           .getKeyGenerator(new int[] { CarbonCommonConstants.LOCAL_DICTIONARY_MAX + 1 });
       this.dummyKey = new int[1];
     } else {
       // else free the encoded column page memory as its of no use
-      encodedColumnpage.freeMemory();
+      encodedColumnPage.freeMemory();
     }
     this.isDecoderBasedFallBackEnabled = isDecoderBasedFallBackEnabled;
     this.actualDataColumnPage = actualDataColumnPage;
@@ -118,7 +118,7 @@
 
   /**
    * Below method will be used to check whether page is local dictionary
-   * generated or not. This will be used for while enoding the the page
+   * generated or not. This will be used for while encoding the the page
    *
    * @return
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
index 23f6288..d6f5d0d 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
@@ -40,7 +40,7 @@
   private float[] floatData;
   private double[] doubleData;
   private byte[] shortIntData;
-  private byte[][] fixedLengthdata;
+  private byte[][] fixedLengthData;
   private int totalLength;
 
   // total number of entries in array
@@ -122,7 +122,7 @@
   @Override
   public void putBytes(int rowId, byte[] bytes) {
     ensureArraySize(rowId, DataTypes.BYTE_ARRAY);
-    this.fixedLengthdata[rowId] = bytes;
+    this.fixedLengthData[rowId] = bytes;
     arrayElementCount++;
     totalLength += bytes.length;
   }
@@ -218,7 +218,7 @@
 
   @Override
   public byte[] getBytes(int rowId) {
-    return this.fixedLengthdata[rowId];
+    return this.fixedLengthData[rowId];
   }
 
   /**
@@ -284,7 +284,7 @@
   public byte[][] getByteArrayPage() {
     byte[][] data = new byte[arrayElementCount][];
     for (int i = 0; i < arrayElementCount; i++) {
-      data[i] = fixedLengthdata[i];
+      data[i] = fixedLengthData[i];
     }
     return data;
   }
@@ -300,7 +300,7 @@
     ByteArrayOutputStream stream = new ByteArrayOutputStream();
     DataOutputStream out = new DataOutputStream(stream);
     for (int i = 0; i < arrayElementCount; i++) {
-      out.write(fixedLengthdata[i]);
+      out.write(fixedLengthData[i]);
     }
     return stream.toByteArray();
   }
@@ -384,7 +384,7 @@
     floatData = null;
     doubleData = null;
     shortIntData = null;
-    fixedLengthdata = null;
+    fixedLengthData = null;
   }
 
   /**
@@ -467,16 +467,16 @@
         doubleData = newArray;
       }
     } else if (dataType == DataTypes.BYTE_ARRAY) {
-      if (fixedLengthdata == null) {
-        fixedLengthdata = new byte[pageSize][];
+      if (fixedLengthData == null) {
+        fixedLengthData = new byte[pageSize][];
       }
-      if (requestSize >= fixedLengthdata.length) {
+      if (requestSize >= fixedLengthData.length) {
         byte[][] newArray = new byte[arrayElementCount * 2][];
         int index = 0;
-        for (byte[] data : fixedLengthdata) {
+        for (byte[] data : fixedLengthData) {
           newArray[index++] = data;
         }
-        fixedLengthdata = newArray;
+        fixedLengthData = newArray;
       }
     } else {
       throw new UnsupportedOperationException(
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
index 182d0d4..7606a9c 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
@@ -133,14 +133,14 @@
 
   private List<ByteBuffer> buildEncoderMeta(ColumnPage inputPage) throws IOException {
     ColumnPageEncoderMeta meta = getEncoderMeta(inputPage);
-    List<ByteBuffer> metaDatas = new ArrayList<>();
+    List<ByteBuffer> metaData = new ArrayList<>();
     if (meta != null) {
       ByteArrayOutputStream stream = new ByteArrayOutputStream();
       DataOutputStream out = new DataOutputStream(stream);
       meta.write(out);
-      metaDatas.add(ByteBuffer.wrap(stream.toByteArray()));
+      metaData.add(ByteBuffer.wrap(stream.toByteArray()));
     }
-    return metaDatas;
+    return metaData;
   }
 
   private void fillMinMaxIndex(ColumnPage inputPage, DataChunk2 dataChunk) {
@@ -176,8 +176,8 @@
    * `buildPageMetadata` will call this for backward compatibility
    */
   protected void fillLegacyFields(DataChunk2 dataChunk) {
-    // Subclass should override this to update datachunk2 if any backward compatibility if required,
-    // For example, when using IndexStorageCodec, rle_page_length and rowid_page_length need to be
+    // Subclass should override this to update DataChunk2 if any backward compatibility if required,
+    // For example, when using IndexStorageCodec, rle_page_length and rowId_page_length need to be
     // updated
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java
index f04d38a..a1e3765 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java
@@ -49,7 +49,7 @@
   // Make it protected for RLEEncoderMeta
   protected String compressorName;
 
-  // Whether the flow shoild go to fill complete vector while decoding the page.
+  // Whether the flow should go to fill complete vector while decoding the page.
   private transient boolean fillCompleteVector;
 
   public ColumnPageEncoderMeta() {
@@ -58,7 +58,7 @@
   public ColumnPageEncoderMeta(TableSpec.ColumnSpec columnSpec, DataType storeDataType,
       String compressorName) {
     if (columnSpec == null) {
-      throw new IllegalArgumentException("columm spec must not be null");
+      throw new IllegalArgumentException("column spec must not be null");
     }
     if (storeDataType == null) {
       throw new IllegalArgumentException("store data type must not be null");
@@ -199,7 +199,7 @@
       byte[] min = new byte[in.readShort()];
       in.readFully(min);
       this.setMinValue(DataTypeUtil.byteToBigDecimal(min));
-      // unique value is obsoleted, maintain for compatiability
+      // unique value is obsoleted, maintain for compatibility
       short uniqueLength = in.readShort();
       in.readFully(new byte[uniqueLength]);
       // scale field is obsoleted. It is stored in the schema data type in columnSpec
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveDeltaIntegralCodec.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveDeltaIntegralCodec.java
index 573c225..67e384a 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveDeltaIntegralCodec.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveDeltaIntegralCodec.java
@@ -42,7 +42,7 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
-import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertableVector;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertibleVector;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.SequentialFill;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.format.DataChunk2;
@@ -325,8 +325,8 @@
           vector.putNull(i);
         }
       }
-      if (vector instanceof ConvertableVector) {
-        ((ConvertableVector) vector).convert();
+      if (vector instanceof ConvertibleVector) {
+        ((ConvertibleVector) vector).convert();
       }
     }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveIntegralCodec.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveIntegralCodec.java
index 3554cd3..40b3331 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveIntegralCodec.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/adaptive/AdaptiveIntegralCodec.java
@@ -41,7 +41,7 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
-import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertableVector;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertibleVector;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.SequentialFill;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.format.DataChunk2;
@@ -299,8 +299,8 @@
           vector.putNull(i);
         }
       }
-      if (vector instanceof ConvertableVector) {
-        ((ConvertableVector) vector).convert();
+      if (vector instanceof ConvertibleVector) {
+        ((ConvertibleVector) vector).convert();
       }
 
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/compress/DirectCompressCodec.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/compress/DirectCompressCodec.java
index 5fff9c2..9d47886 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/compress/DirectCompressCodec.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/compress/DirectCompressCodec.java
@@ -42,7 +42,7 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
-import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertableVector;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ConvertibleVector;
 import org.apache.carbondata.core.scan.result.vector.impl.directread.SequentialFill;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.format.Encoding;
@@ -253,8 +253,8 @@
           vector.putNull(i);
         }
       }
-      if (vector instanceof ConvertableVector) {
-        ((ConvertableVector) vector).convert();
+      if (vector instanceof ConvertibleVector) {
+        ((ConvertibleVector) vector).convert();
       }
     }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/enums/EscapeSequences.java b/core/src/main/java/org/apache/carbondata/core/enums/EscapeSequences.java
index 4bbdddf..fe6e01a 100644
--- a/core/src/main/java/org/apache/carbondata/core/enums/EscapeSequences.java
+++ b/core/src/main/java/org/apache/carbondata/core/enums/EscapeSequences.java
@@ -27,7 +27,7 @@
   private String name;
 
   /**
-   * unicode of the escapechar
+   * unicode of the escape char
    */
   private char escapeChar;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/index/IndexChooser.java b/core/src/main/java/org/apache/carbondata/core/index/IndexChooser.java
index 4c8bcc5..06cd079 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexChooser.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexChooser.java
@@ -43,7 +43,7 @@
 
 /**
  * This chooser does 2 jobs.
- * 1. Based on filter expression it converts the available Indexs to Index expression.
+ * 1. Based on filter expression it converts the available Indexes to Index expression.
  *   For example, there are 2 Indexes available on table1
  *   Index1 : column1
  *   Index2 : column2
diff --git a/core/src/main/java/org/apache/carbondata/core/index/IndexFilter.java b/core/src/main/java/org/apache/carbondata/core/index/IndexFilter.java
index 758d30d..cbb41c1 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexFilter.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexFilter.java
@@ -36,7 +36,7 @@
 import org.apache.carbondata.core.scan.expression.logical.AndExpression;
 import org.apache.carbondata.core.scan.filter.FilterExpressionProcessor;
 import org.apache.carbondata.core.scan.filter.intf.FilterOptimizer;
-import org.apache.carbondata.core.scan.filter.optimizer.RangeFilterOptmizer;
+import org.apache.carbondata.core.scan.filter.optimizer.RangeFilterOptimizer;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.util.ObjectSerializationUtil;
@@ -231,7 +231,7 @@
     processFilterExpressionWithoutRange(isFilterDimensions, isFilterMeasures);
     if (null != expression) {
       // Optimize Filter Expression and fit RANGE filters is conditions apply.
-      FilterOptimizer rangeFilterOptimizer = new RangeFilterOptmizer(expression);
+      FilterOptimizer rangeFilterOptimizer = new RangeFilterOptimizer(expression);
       rangeFilterOptimizer.optimizeFilter();
     }
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/index/IndexInputFormat.java b/core/src/main/java/org/apache/carbondata/core/index/IndexInputFormat.java
index cd82345..a6e02527 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexInputFormat.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexInputFormat.java
@@ -93,7 +93,7 @@
 
   private boolean isCountStarJob = false;
 
-  // Whether AsyncCall to the Index Server(true in the case of prepriming)
+  // Whether AsyncCall to the Index Server(true in the case of pre-priming)
   private boolean isAsyncCall;
 
   IndexInputFormat() {
@@ -128,11 +128,11 @@
 
   @Override
   public List<InputSplit> getSplits(JobContext job) throws IOException {
-    List<IndexInputSplitWrapper> distributables;
-    distributables =
+    List<IndexInputSplitWrapper> distributableList;
+    distributableList =
         IndexChooser.getDefaultIndex(table, filterResolverIntf).toDistributable(validSegments);
-    List<InputSplit> inputSplits = new ArrayList<>(distributables.size());
-    inputSplits.addAll(distributables);
+    List<InputSplit> inputSplits = new ArrayList<>(distributableList.size());
+    inputSplits.addAll(distributableList);
     return inputSplits;
   }
 
@@ -356,19 +356,19 @@
   *  then need to cut as transferring big query to IndexServer will be costly.
   */
   public void setTaskGroupDesc(String taskGroupDesc) {
-    int maxJobLenth;
+    int maxJobLength;
     try {
-      String maxJobLenthString = CarbonProperties.getInstance()
+      String maxJobLengthString = CarbonProperties.getInstance()
               .getProperty(CarbonCommonConstants.CARBON_INDEX_SERVER_JOBNAME_LENGTH ,
                       CarbonCommonConstants.CARBON_INDEX_SERVER_JOBNAME_LENGTH_DEFAULT);
-      maxJobLenth = Integer.parseInt(maxJobLenthString);
+      maxJobLength = Integer.parseInt(maxJobLengthString);
     } catch (Exception e) {
-      String maxJobLenthString = CarbonProperties.getInstance()
+      String maxJobLengthString = CarbonProperties.getInstance()
               .getProperty(CarbonCommonConstants.CARBON_INDEX_SERVER_JOBNAME_LENGTH_DEFAULT);
-      maxJobLenth = Integer.parseInt(maxJobLenthString);
+      maxJobLength = Integer.parseInt(maxJobLengthString);
     }
-    if (taskGroupDesc.length() > maxJobLenth) {
-      this.taskGroupDesc = taskGroupDesc.substring(0, maxJobLenth);
+    if (taskGroupDesc.length() > maxJobLength) {
+      this.taskGroupDesc = taskGroupDesc.substring(0, maxJobLength);
     } else {
       this.taskGroupDesc = taskGroupDesc;
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/index/IndexJob.java b/core/src/main/java/org/apache/carbondata/core/index/IndexJob.java
index a89a661..608f989 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexJob.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexJob.java
@@ -28,7 +28,7 @@
 
 /**
  * Distributable index job to execute the #IndexInputFormat in cluster. it prunes the
- * indexes distributably and returns the final blocklet list
+ * indexes distributed and returns the final blocklet list
  */
 public interface IndexJob extends Serializable {
 
diff --git a/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java b/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java
index f38c33f..2a69e17 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java
@@ -72,7 +72,7 @@
   private Map<String, List<TableIndex>> allIndexes = new ConcurrentHashMap<>();
 
   /**
-   * Contains the table name to the tablepath mapping.
+   * Contains the table name to the table path mapping.
    */
   private Map<String, String> tablePathMap = new ConcurrentHashMap<>();
 
@@ -135,7 +135,7 @@
         tableIndices = allIndexes.get(tableId);
       }
     }
-    // in case of fileformat or sdk, when table is dropped or schema is changed the indexes are
+    // in case of file format or sdk, when table is dropped or schema is changed the indexes are
     // not cleared, they need to be cleared by using API, so compare the columns, if not same, clear
     // the indexes on that table
     if (allIndexes.size() > 0 && !CollectionUtils.isEmpty(allIndexes.get(tableId))
@@ -595,7 +595,7 @@
   }
 
   private boolean hasCGIndex(CarbonTable carbonTable) throws IOException {
-    // In case of spark file format flow, carbontable will be null
+    // In case of spark file format flow, carbon table will be null
     if (null == carbonTable) {
       return false;
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java b/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java
index 7aa5645..0da8f79 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java
@@ -53,7 +53,7 @@
 import org.apache.carbondata.core.metadata.schema.table.IndexSchema;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.events.Event;
@@ -114,14 +114,14 @@
   /**
    * Pass the valid segments and prune the index using filter expression
    *
-   * @param allsegments
+   * @param allSegments
    * @param filter
    * @return
    */
-  public List<ExtendedBlocklet> prune(List<Segment> allsegments, final IndexFilter filter,
+  public List<ExtendedBlocklet> prune(List<Segment> allSegments, final IndexFilter filter,
       final List<PartitionSpec> partitions) throws IOException {
     final List<ExtendedBlocklet> blocklets = new ArrayList<>();
-    List<Segment> segments = getCarbonSegments(allsegments);
+    List<Segment> segments = getCarbonSegments(allSegments);
     final Map<Segment, List<Index>> indexes;
     boolean isFilterPresent = filter != null && !filter.isEmpty();
     Set<Path> partitionLocations = getPartitionLocations(partitions);
@@ -170,9 +170,9 @@
     return extendedBlocklets;
   }
 
-  private List<Segment> getCarbonSegments(List<Segment> allsegments) {
+  private List<Segment> getCarbonSegments(List<Segment> allSegments) {
     List<Segment> segments = new ArrayList<>();
-    for (Segment segment : allsegments) {
+    for (Segment segment : allSegments) {
       if (segment.isCarbonSegment()) {
         segments.add(segment);
       }
@@ -215,46 +215,45 @@
       SegmentProperties segmentProperties =
           segmentPropertiesFetcher.getSegmentProperties(segment, partitionLocations);
       if (filter.isResolvedOnSegment(segmentProperties)) {
-        FilterExecuter filterExecuter;
+        FilterExecutor filterExecutor;
         if (!isExternalSegment) {
-          filterExecuter = FilterUtil
-              .getFilterExecuterTree(filter.getResolver(), segmentProperties, null,
+          filterExecutor = FilterUtil
+              .getFilterExecutorTree(filter.getResolver(), segmentProperties, null,
                   table.getMinMaxCacheColumns(segmentProperties), false);
         } else {
-          filterExecuter = FilterUtil
-              .getFilterExecuterTree(filter.getExternalSegmentResolver(), segmentProperties, null,
+          filterExecutor = FilterUtil
+              .getFilterExecutorTree(filter.getExternalSegmentResolver(), segmentProperties, null,
                   table.getMinMaxCacheColumns(segmentProperties), false);
         }
         for (Index index : indexes.get(segment)) {
           if (!isExternalSegment) {
             pruneBlocklets.addAll(index
-                .prune(filter.getResolver(), segmentProperties, filterExecuter, this.table));
+                .prune(filter.getResolver(), segmentProperties, filterExecutor, this.table));
           } else {
             pruneBlocklets.addAll(index
-                .prune(filter.getExternalSegmentResolver(), segmentProperties, filterExecuter,
+                .prune(filter.getExternalSegmentResolver(), segmentProperties, filterExecutor,
                     this.table));
           }
         }
       } else {
-        FilterExecuter filterExecuter;
+        FilterExecutor filterExecutor;
         Expression expression = filter.getExpression();
         if (!isExternalSegment) {
-          filterExecuter = FilterUtil.getFilterExecuterTree(
+          filterExecutor = FilterUtil.getFilterExecutorTree(
               new IndexFilter(segmentProperties, table, expression).getResolver(),
               segmentProperties, null, table.getMinMaxCacheColumns(segmentProperties), false);
         } else {
-          filterExecuter = FilterUtil.getFilterExecuterTree(
+          filterExecutor = FilterUtil.getFilterExecutorTree(
               new IndexFilter(segmentProperties, table, expression).getExternalSegmentResolver(),
               segmentProperties, null, table.getMinMaxCacheColumns(segmentProperties), false);
         }
         for (Index index : indexes.get(segment)) {
           if (!isExternalSegment) {
-            pruneBlocklets.addAll(index
-                .prune(filter.getExpression(), segmentProperties, table, filterExecuter));
+            pruneBlocklets.addAll(index.prune(
+                filter.getExpression(), segmentProperties, table, filterExecutor));
           } else {
-            pruneBlocklets.addAll(index
-                .prune(filter.getExternalSegmentFilter(), segmentProperties, table,
-                    filterExecuter));
+            pruneBlocklets.addAll(index.prune(
+                filter.getExternalSegmentFilter(), segmentProperties, table, filterExecutor));
           }
         }
       }
@@ -324,8 +323,8 @@
         }
       }
       if (prev == 0 || prev != eachSegmentIndexList.size()) {
-        // if prev == 0. Add a segment's all indexess
-        // eachSegmentIndexList.size() != prev, adding the last remaining indexess of this segment
+        // if prev == 0. Add a segment's all indexes
+        // eachSegmentIndexList.size() != prev, adding the last remaining indexes of this segment
         segmentIndexGroupList
             .add(new SegmentIndexGroup(segment, prev, eachSegmentIndexList.size() - 1));
       }
@@ -338,9 +337,9 @@
       throw new RuntimeException(" not all the files processed ");
     }
     if (indexListForEachThread.size() < numOfThreadsForPruning) {
-      // If the total indexess fitted in lesser number of threads than numOfThreadsForPruning.
-      // Launch only that many threads where indexess are fitted while grouping.
-      LOG.info("indexess is distributed in " + indexListForEachThread.size() + " threads");
+      // If the total indexes fitted in lesser number of threads than numOfThreadsForPruning.
+      // Launch only that many threads where indexes are fitted while grouping.
+      LOG.info("indexes is distributed in " + indexListForEachThread.size() + " threads");
       numOfThreadsForPruning = indexListForEachThread.size();
     }
     LOG.info(
@@ -365,14 +364,14 @@
             Segment segment = segmentIndexGroup.getSegment();
             boolean isExternalSegment = segment.getSegmentPath() != null;
             if (filter.isResolvedOnSegment(segmentProperties)) {
-              FilterExecuter filterExecuter;
+              FilterExecutor filterExecutor;
               if (!isExternalSegment) {
-                filterExecuter = FilterUtil
-                    .getFilterExecuterTree(filter.getResolver(), segmentProperties, null,
+                filterExecutor = FilterUtil
+                    .getFilterExecutorTree(filter.getResolver(), segmentProperties, null,
                         table.getMinMaxCacheColumns(segmentProperties), false);
               } else {
-                filterExecuter = FilterUtil
-                    .getFilterExecuterTree(filter.getExternalSegmentResolver(), segmentProperties,
+                filterExecutor = FilterUtil
+                    .getFilterExecutorTree(filter.getExternalSegmentResolver(), segmentProperties,
                         null, table.getMinMaxCacheColumns(segmentProperties), false);
               }
               for (int i = segmentIndexGroup.getFromIndex();
@@ -380,10 +379,10 @@
                 List<Blocklet> dmPruneBlocklets;
                 if (!isExternalSegment) {
                   dmPruneBlocklets = indexList.get(i)
-                      .prune(filter.getResolver(), segmentProperties, filterExecuter, table);
+                      .prune(filter.getResolver(), segmentProperties, filterExecutor, table);
                 } else {
                   dmPruneBlocklets = indexList.get(i)
-                      .prune(filter.getExternalSegmentResolver(), segmentProperties, filterExecuter,
+                      .prune(filter.getExternalSegmentResolver(), segmentProperties, filterExecutor,
                           table);
                 }
                 pruneBlocklets.addAll(addSegmentId(
@@ -392,13 +391,13 @@
               }
             } else {
               Expression filterExpression = filter.getNewCopyOfExpression();
-              FilterExecuter filterExecuter;
+              FilterExecutor filterExecutor;
               if (!isExternalSegment) {
-                filterExecuter = FilterUtil.getFilterExecuterTree(
+                filterExecutor = FilterUtil.getFilterExecutorTree(
                     new IndexFilter(segmentProperties, table, filterExpression).getResolver(),
                     segmentProperties, null, table.getMinMaxCacheColumns(segmentProperties), false);
               } else {
-                filterExecuter = FilterUtil.getFilterExecuterTree(
+                filterExecutor = FilterUtil.getFilterExecutorTree(
                     new IndexFilter(segmentProperties, table, filterExpression)
                         .getExternalSegmentResolver(), segmentProperties, null,
                     table.getMinMaxCacheColumns(segmentProperties), false);
@@ -408,11 +407,11 @@
                 List<Blocklet> dmPruneBlocklets;
                 if (!isExternalSegment) {
                   dmPruneBlocklets = indexList.get(i)
-                      .prune(filterExpression, segmentProperties, table, filterExecuter);
+                      .prune(filterExpression, segmentProperties, table, filterExecutor);
                 } else {
                   dmPruneBlocklets = indexList.get(i)
                       .prune(filter.getExternalSegmentFilter(), segmentProperties, table,
-                          filterExecuter);
+                          filterExecutor);
                 }
                 pruneBlocklets.addAll(addSegmentId(
                     blockletDetailsFetcher.getExtendedBlocklets(dmPruneBlocklets, segment),
@@ -463,18 +462,18 @@
 
   /**
    * This is used for making the index distributable.
-   * It takes the valid segments and returns all the indexess as distributable objects so that
+   * It takes the valid segments and returns all the indexes as distributable objects so that
    * it can be distributed across machines.
    *
    * @return
    */
-  public List<IndexInputSplit> toDistributable(List<Segment> allsegments) {
-    List<IndexInputSplit> distributables = new ArrayList<>();
-    List<Segment> segments = getCarbonSegments(allsegments);
+  public List<IndexInputSplit> toDistributable(List<Segment> allSegments) {
+    List<IndexInputSplit> distributableList = new ArrayList<>();
+    List<Segment> segments = getCarbonSegments(allSegments);
     for (Segment segment : segments) {
-      distributables.addAll(indexFactory.toDistributable(segment));
+      distributableList.addAll(indexFactory.toDistributable(segment));
     }
-    return distributables;
+    return distributableList;
   }
 
   public IndexInputSplitWrapper toDistributableSegment(Segment segment, String uniqueId)
@@ -483,7 +482,7 @@
   }
 
   /**
-   * This method returns all the indexess corresponding to the distributable object
+   * This method returns all the indexes corresponding to the distributable object
    *
    * @param distributable
    * @return
@@ -508,12 +507,12 @@
     Set<Path> partitionsToPrune = getPartitionLocations(partitions);
     SegmentProperties segmentProperties = segmentPropertiesFetcher
         .getSegmentProperties(distributable.getSegment(), partitionsToPrune);
-    FilterExecuter filterExecuter = FilterUtil
-        .getFilterExecuterTree(filterExp, segmentProperties,
+    FilterExecutor filterExecutor = FilterUtil
+        .getFilterExecutorTree(filterExp, segmentProperties,
             null, table.getMinMaxCacheColumns(segmentProperties),
             false);
     for (Index index : indices) {
-      blocklets.addAll(index.prune(filterExp, segmentProperties, filterExecuter, table));
+      blocklets.addAll(index.prune(filterExp, segmentProperties, filterExecutor, table));
     }
     BlockletSerializer serializer = new BlockletSerializer();
     String writePath =
@@ -526,10 +525,10 @@
       ExtendedBlocklet detailedBlocklet = blockletDetailsFetcher
           .getExtendedBlocklet(blocklet, distributable.getSegment());
       if (indexFactory.getIndexLevel() == IndexLevel.FG) {
-        String blockletwritePath =
+        String blockletWritePath =
             writePath + CarbonCommonConstants.FILE_SEPARATOR + System.nanoTime();
-        detailedBlocklet.setIndexWriterPath(blockletwritePath);
-        serializer.serializeBlocklet((FineGrainBlocklet) blocklet, blockletwritePath);
+        detailedBlocklet.setIndexWriterPath(blockletWritePath);
+        serializer.serializeBlocklet((FineGrainBlocklet) blocklet, blockletWritePath);
       }
       detailedBlocklet.setSegment(distributable.getSegment());
       detailedBlocklets.add(detailedBlocklet);
@@ -538,7 +537,7 @@
   }
 
   /**
-   * Clear only the indexess of the segments
+   * Clear only the indexes of the segments
    * @param segmentIds list of segmentIds to be cleared from cache.
    */
   public void clear(List<String> segmentIds) {
@@ -559,8 +558,8 @@
   /**
    * delete only the index of the segments
    */
-  public void deleteIndexData(List<Segment> allsegments) throws IOException {
-    List<Segment> segments = getCarbonSegments(allsegments);
+  public void deleteIndexData(List<Segment> allSegments) throws IOException {
+    List<Segment> segments = getCarbonSegments(allSegments);
     for (Segment segment: segments) {
       indexFactory.deleteIndexData(segment);
     }
@@ -596,15 +595,15 @@
   /**
    * Prune the index of the given segments and return the Map of blocklet path and row count
    *
-   * @param allsegments
+   * @param allSegments
    * @param partitions
    * @return
    * @throws IOException
    */
-  public Map<String, Long> getBlockRowCount(List<Segment> allsegments,
+  public Map<String, Long> getBlockRowCount(List<Segment> allSegments,
       final List<PartitionSpec> partitions, TableIndex defaultIndex)
       throws IOException {
-    List<Segment> segments = getCarbonSegments(allsegments);
+    List<Segment> segments = getCarbonSegments(allSegments);
     Map<String, Long> blockletToRowCountMap = new HashMap<>();
     for (Segment segment : segments) {
       List<CoarseGrainIndex> indexes = defaultIndex.getIndexFactory().getIndexes(segment);
@@ -624,11 +623,11 @@
 
   /**
    * Get the mapping of blocklet path and row count for all blocks. This method skips the
-   * validation of partition info for countStar job with indexserver enabled.
+   * validation of partition info for countStar job with index server enabled.
    */
-  public Map<String, Long> getBlockRowCount(TableIndex defaultIndex, List<Segment> allsegments,
+  public Map<String, Long> getBlockRowCount(TableIndex defaultIndex, List<Segment> allSegments,
       final List<PartitionSpec> partitions) throws IOException {
-    List<Segment> segments = getCarbonSegments(allsegments);
+    List<Segment> segments = getCarbonSegments(allSegments);
     Map<String, Long> blockletToRowCountMap = new HashMap<>();
     for (Segment segment : segments) {
       List<CoarseGrainIndex> indexes = defaultIndex.getIndexFactory().getIndexes(segment);
@@ -642,14 +641,14 @@
   /**
    * Prune the index of the given segments and return the Map of blocklet path and row count
    *
-   * @param allsegments
+   * @param allSegments
    * @param partitions
    * @return
    * @throws IOException
    */
-  public long getRowCount(List<Segment> allsegments, final List<PartitionSpec> partitions,
+  public long getRowCount(List<Segment> allSegments, final List<PartitionSpec> partitions,
       TableIndex defaultIndex) throws IOException {
-    List<Segment> segments = getCarbonSegments(allsegments);
+    List<Segment> segments = getCarbonSegments(allSegments);
     long totalRowCount = 0L;
     for (Segment segment : segments) {
       List<CoarseGrainIndex> indexes = defaultIndex.getIndexFactory().getIndexes(segment);
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/CacheableIndex.java b/core/src/main/java/org/apache/carbondata/core/index/dev/CacheableIndex.java
index 435dc2a..903bef2 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/CacheableIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/CacheableIndex.java
@@ -44,15 +44,15 @@
       BlockletIndexWrapper blockletIndexWrapper) throws IOException;
 
   /**
-   * Get all the uncached distributables from the list.
+   * Get all the uncached distributableList from the list.
    *
-   * @param distributables
+   * @param distributableList
    * @return
    */
-  List<IndexInputSplit> getAllUncachedDistributables(List<IndexInputSplit> distributables)
+  List<IndexInputSplit> getAllUncached(List<IndexInputSplit> distributableList)
       throws IOException;
 
-  List<IndexInputSplit> getAllUncachedDistributables(
+  List<IndexInputSplit> getAllUncached(
       List<Segment> segments, IndexExprWrapper indexExprWrapper) throws IOException;
 
   void updateSegmentIndex(
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/Index.java b/core/src/main/java/org/apache/carbondata/core/index/dev/Index.java
index 0270ab1..fb5a792 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/Index.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/Index.java
@@ -28,7 +28,7 @@
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
 /**
@@ -47,24 +47,24 @@
    * It returns the list of blocklets where these filters can exist.
    */
   List<T> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties,
-      FilterExecuter filterExecuter, CarbonTable table) throws IOException;
+      FilterExecutor filterExecutor, CarbonTable table) throws IOException;
 
   /**
    * Prune the table with filter expression. It returns the list of
    * blocklets where these filters can exist.
    */
   List<T> prune(Expression filter, SegmentProperties segmentProperties,
-      CarbonTable carbonTable, FilterExecuter filterExecuter);
+      CarbonTable carbonTable, FilterExecutor filterExecutor);
 
   /**
    * Prune the indexes for finding the row count. It returns a Map of
-   * blockletpath and the row count
+   * blocklet path and the row count
    */
   long getRowCount(Segment segment, List<PartitionSpec> partitions);
 
   /**
    * Prune the indexes for finding the row count for each block. It returns a Map of
-   * blockletpath and the row count
+   * blocklet path and the row count
    */
   Map<String, Long> getRowCountForEachBlock(Segment segment, List<PartitionSpec> partitions,
       Map<String, Long> blockletToRowCountMap);
@@ -93,7 +93,7 @@
   /**
    * Returns number of records information that are stored in Index.
    * Driver multi-thread block pruning happens based on the number of rows in Index.
-   * So Indexs can have multiple rows if they store information of multiple files.
+   * So Indexes can have multiple rows if they store information of multiple files.
    * so, this number of entries is used to represent how many files information a Index contains
    */
   int getNumberOfEntries();
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/IndexFactory.java b/core/src/main/java/org/apache/carbondata/core/index/dev/IndexFactory.java
index a014102..6299a73 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/IndexFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/IndexFactory.java
@@ -181,7 +181,7 @@
 
   /**
    * whether to block operation on corresponding table or column.
-   * For example, bloomfilter index will block changing datatype for bloomindex column.
+   * For example, bloom filter index will block changing datatype for bloom index column.
    * By default it will not block any operation.
    *
    * @param operation table operation
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndex.java b/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndex.java
index 3fb2d99..8072d8b 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndex.java
@@ -29,7 +29,7 @@
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 
 /**
  * Index for Coarse Grain level, see {@link org.apache.carbondata.core.index.IndexLevel#CG}
@@ -40,7 +40,7 @@
 
   @Override
   public List<Blocklet> prune(Expression expression, SegmentProperties segmentProperties,
-      CarbonTable carbonTable, FilterExecuter filterExecuter) {
+      CarbonTable carbonTable, FilterExecutor filterExecutor) {
     throw new UnsupportedOperationException("Filter expression not supported");
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndexFactory.java b/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndexFactory.java
index 57f3d73..baf15c2 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndexFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/cgindex/CoarseGrainIndexFactory.java
@@ -27,9 +27,9 @@
 /**
  *  Factory for {@link CoarseGrainIndex}
  *  1. Any filter query which hits the table with index will call prune method of CGindex.
- *  2. The prune method of CGindex return list Blocklet , these blocklets contain the
+ *  2. The prune method of CGIndex return list Blocklet , these blocklets contain the
  *     information of block and blocklet.
- *  3. Based on the splits scanrdd schedule the tasks.
+ *  3. Based on the splits scanRdd schedule the tasks.
  */
 @InterfaceAudience.Developer("Index")
 @InterfaceStability.Evolving
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/AndIndexExprWrapper.java b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/AndIndexExprWrapper.java
index 0dbdbd9..a29734a 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/AndIndexExprWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/AndIndexExprWrapper.java
@@ -126,7 +126,7 @@
   }
 
   @Override
-  public IndexExprWrapper getRightIndexWrapprt() {
+  public IndexExprWrapper getRightIndexWrapper() {
     return right;
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapper.java b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapper.java
index 04867dd..02bb353 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapper.java
@@ -53,7 +53,7 @@
       List<PartitionSpec> partitionsToPrune) throws IOException;
 
   /**
-   * It is used in case on distributable index. First using job it gets all blockets from all
+   * It is used in case on distributable index. First using job it gets all blocklets from all
    * related indexes. These blocklets are passed to this method to apply expression.
    *
    * @param blocklets
@@ -79,7 +79,7 @@
       throws IOException;
 
   /**
-   * Each leaf node is identified by uniqueid, so if user wants the underlying filter expression for
+   * Each leaf node is identified by uniqueId, so if user wants the underlying filter expression for
    * any leaf node then this method can be used.
    * @param uniqueId
    * @return
@@ -99,7 +99,7 @@
   /**
    * get the right index wrapper
    */
-  public abstract IndexExprWrapper getRightIndexWrapprt();
+  public abstract IndexExprWrapper getRightIndexWrapper();
 
   /**
    * Convert segment to distributable object.
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapperImpl.java b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapperImpl.java
index 16cc13b..62c79e6 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapperImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexExprWrapperImpl.java
@@ -111,7 +111,7 @@
   }
 
   @Override
-  public IndexExprWrapper getRightIndexWrapprt() {
+  public IndexExprWrapper getRightIndexWrapper() {
     return null;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexWrapperSimpleInfo.java b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexWrapperSimpleInfo.java
index 689028e..cbb923d 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexWrapperSimpleInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/IndexWrapperSimpleInfo.java
@@ -56,11 +56,11 @@
     } else if (indexExprWrapper instanceof AndIndexExprWrapper) {
       return new IndexWrapperSimpleInfo(WrapperType.AND,
           fromIndexWrapper(indexExprWrapper.getLeftIndexWrapper()),
-          fromIndexWrapper(indexExprWrapper.getRightIndexWrapprt()));
+          fromIndexWrapper(indexExprWrapper.getRightIndexWrapper()));
     } else {
       return new IndexWrapperSimpleInfo(WrapperType.OR,
           fromIndexWrapper(indexExprWrapper.getLeftIndexWrapper()),
-          fromIndexWrapper(indexExprWrapper.getRightIndexWrapprt()));
+          fromIndexWrapper(indexExprWrapper.getRightIndexWrapper()));
     }
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/OrIndexExprWrapper.java b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/OrIndexExprWrapper.java
index f419513..ec714de 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/expr/OrIndexExprWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/expr/OrIndexExprWrapper.java
@@ -119,7 +119,7 @@
   }
 
   @Override
-  public IndexExprWrapper getRightIndexWrapprt() {
+  public IndexExprWrapper getRightIndexWrapper() {
     return right;
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndex.java b/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndex.java
index a0b5cea..f98122e 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndex.java
@@ -28,7 +28,7 @@
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 
 /**
  * Index for Fine Grain level, see {@link org.apache.carbondata.core.index.IndexLevel#FG}
@@ -39,7 +39,7 @@
 
   @Override
   public List<FineGrainBlocklet> prune(Expression filter, SegmentProperties segmentProperties,
-      CarbonTable carbonTable, FilterExecuter filterExecuter) {
+      CarbonTable carbonTable, FilterExecutor filterExecutor) {
     throw new UnsupportedOperationException("Filter expression not supported");
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndexFactory.java b/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndexFactory.java
index e7fd935..2b0674d 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndexFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/dev/fgindex/FineGrainIndexFactory.java
@@ -27,13 +27,13 @@
 /**
  *  Factory for {@link FineGrainIndex}
  *
- *  1. Any filter query which hits the table with index will call prune method of FGindex.
- *  2. The prune method of FGindex return list FineGrainBlocklet , these blocklets contain the
- *     information of block, blocklet, page and rowids information as well.
- *  3. The pruned blocklets are internally wriitten to file and returns only the block ,
+ *  1. Any filter query which hits the table with index will call prune method of FGIndex.
+ *  2. The prune method of FGIndex return list FineGrainBlocklet , these blocklets contain the
+ *     information of block, blocklet, page and rowIds information as well.
+ *  3. The pruned blocklets are internally written to file and returns only the block ,
  *    blocklet and filepath information as part of Splits.
- *  4. Based on the splits scanrdd schedule the tasks.
- *  5. In filterscanner we check the indexwriterpath from split and reNoteads the
+ *  4. Based on the splits scan rdd schedule the tasks.
+ *  5. In filter scanner we check the index writer path from split and reads the
  *     bitset if exists. And pass this bitset as input to it.
  */
 @InterfaceAudience.Developer("Index")
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
index 03c137e..28d9769 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
@@ -31,7 +31,7 @@
 public interface BlockletDetailsFetcher {
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentId.
+   * Get the blocklet detail information based on blockletId, blockId and segmentId.
    *
    * @param blocklets
    * @param segment
@@ -42,7 +42,7 @@
       throws IOException;
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentId.
+   * Get the blocklet detail information based on blockletId, blockId and segmentId.
    *
    * @param blocklet
    * @param segment
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletIndexStore.java b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletIndexStore.java
index e9231bf..7e17673 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletIndexStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletIndexStore.java
@@ -174,7 +174,7 @@
         new ArrayList<>(tableSegmentUniqueIdentifiers.size());
     List<TableBlockIndexUniqueIdentifierWrapper> missedIdentifiersWrapper = new ArrayList<>();
     BlockletIndexWrapper blockletIndexWrapper = null;
-    // Get the indexes for each indexfile from cache.
+    // Get the indexes for each index file from cache.
     try {
       for (TableBlockIndexUniqueIdentifierWrapper
                identifierWrapper : tableSegmentUniqueIdentifiers) {
@@ -278,7 +278,7 @@
   /**
    * Below method will be used to load the segment of segments
    * One segment may have multiple task , so  table segment will be loaded
-   * based on task id and will return the map of taksId to table segment
+   * based on task id and will return the map of taskId to table segment
    * map
    *
    * @return map of taks id to segment mapping
@@ -317,12 +317,12 @@
   private synchronized Object addAndGetSegmentLock(String uniqueIdentifier) {
     // get the segment lock object if it is present then return
     // otherwise add the new lock and return
-    Object segmentLoderLockObject = segmentLockMap.get(uniqueIdentifier);
-    if (null == segmentLoderLockObject) {
-      segmentLoderLockObject = new Object();
-      segmentLockMap.put(uniqueIdentifier, segmentLoderLockObject);
+    Object segmentLockObject = segmentLockMap.get(uniqueIdentifier);
+    if (null == segmentLockObject) {
+      segmentLockObject = new Object();
+      segmentLockMap.put(uniqueIdentifier, segmentLockObject);
     }
-    return segmentLoderLockObject;
+    return segmentLockObject;
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlocklet.java b/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlocklet.java
index 37aa60d..edb921b 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlocklet.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlocklet.java
@@ -158,9 +158,9 @@
   }
 
   /**
-   * Method to seralize extended blocklet and inputsplit for index server
+   * Method to serialize extended blocklet and input split for index server
    * DataFormat
-   * <Extended Blocklet data><Carbon input split serializeData lenght><CarbonInputSplitData>
+   * <Extended Blocklet data><Carbon input split serializeData length><CarbonInputSplitData>
    * @param out
    * @param uniqueLocation
    * @throws IOException
@@ -189,7 +189,7 @@
         inputSplit.setFilePath(null);
         inputSplit.setBucketId(null);
         if (inputSplit.isBlockCache()) {
-          inputSplit.updateFooteroffset();
+          inputSplit.updateFooterOffset();
           inputSplit.updateBlockLength();
           inputSplit.setWriteDetailInfo(false);
         }
@@ -201,7 +201,7 @@
   }
 
   /**
-   * Method to deseralize extended blocklet and inputsplit for index server
+   * Method to deserialize extended blocklet and input split for index server
    * @param in
    * @param locations
    * @param tablePath
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapper.java b/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapper.java
index b9fb4b5..67d2eb8 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapper.java
@@ -84,7 +84,7 @@
         final CarbonFile carbonFile = FileFactory.getCarbonFile(folderPath);
         boolean isFolderExists = true;
         if (!carbonFile.isFileExist()) {
-          LOGGER.warn("Folder:" + folderPath + "doesn't exists, data will be send through netwrok");
+          LOGGER.warn("Folder:" + folderPath + "doesn't exists, data will be send through network");
           isFolderExists = false;
         }
         if (isFolderExists) {
@@ -164,7 +164,7 @@
   }
 
   /**
-   * deseralize the blocklet data from file or stream
+   * deserialize the blocklet data from file or stream
    * data format
    * <number of splits><number of unique location[short]><locations><serialize data len><data>
    *
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapperContainer.java b/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapperContainer.java
index 19b0039..9816181 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapperContainer.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/ExtendedBlockletWrapperContainer.java
@@ -62,7 +62,7 @@
     this.isFallbackJob = isFallbackJob;
   }
 
-  public List<ExtendedBlocklet> getExtendedBlockets(String tablePath, String queryId,
+  public List<ExtendedBlocklet> getExtendedBlocklets(String tablePath, String queryId,
       boolean isCountJob) throws IOException {
     if (!isFallbackJob) {
       int numOfThreads = CarbonProperties.getNumOfThreadsForPruning();
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/TableBlockIndexUniqueIdentifier.java b/core/src/main/java/org/apache/carbondata/core/indexstore/TableBlockIndexUniqueIdentifier.java
index 0b2b3e3..7d422e7 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/TableBlockIndexUniqueIdentifier.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/TableBlockIndexUniqueIdentifier.java
@@ -23,7 +23,7 @@
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 
 /**
- * Class holds the indexFile information to uniquely identitify the carbon index
+ * Class holds the indexFile information to uniquely identify the carbon index
  */
 public class TableBlockIndexUniqueIdentifier implements Serializable {
 
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/UnsafeMemoryDMStore.java b/core/src/main/java/org/apache/carbondata/core/indexstore/UnsafeMemoryDMStore.java
index 4fd0ebe..5e0f579 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/UnsafeMemoryDMStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/UnsafeMemoryDMStore.java
@@ -97,7 +97,7 @@
    * LO: Last Offset
    *
    * Read:
-   * FD: Read directly based of byte postion added in CarbonRowSchema
+   * FD: Read directly based of byte position added in CarbonRowSchema
    *
    * VD: Read based on below logic
    * if not last variable column schema
@@ -144,7 +144,7 @@
     bytePosition += CarbonCommonConstants.INT_SIZE_IN_BYTE;
     // start byte position of variable length data
     int varColPosition = bytePosition + CarbonCommonConstants.INT_SIZE_IN_BYTE;
-    // current position refers to current byte postion in memory block
+    // current position refers to current byte position in memory block
     int currentPosition;
     for (int i = 0; i < schema.length; i++) {
       switch (schema[i].getSchemaType()) {
@@ -167,11 +167,11 @@
           break;
       }
     }
-    // writting the last offset
+    // writing the last offset
     getUnsafe()
         .putInt(memoryBlock.getBaseObject(), memoryBlock.getBaseOffset() + pointer + bytePosition,
             varColPosition);
-    // after adding last offset increament the length by 4 bytes as last postion
+    // after adding last offset increment the length by 4 bytes as last position
     // written as INT
     runningLength += CarbonCommonConstants.INT_SIZE_IN_BYTE;
     pointers[rowCount++] = pointer;
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockIndex.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockIndex.java
index 0ad7940..6126636 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockIndex.java
@@ -58,7 +58,7 @@
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.FilterExpressionProcessor;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.executer.ImplicitColumnFilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.BlockletIndexUtil;
@@ -133,7 +133,7 @@
     this.isPartitionTable = blockletIndexModel.getCarbonTable().isHivePartitionTable();
     if (this.isPartitionTable || !blockletIndexModel.getCarbonTable().isTransactionalTable() ||
         blockletIndexModel.getCarbonTable().isSupportFlatFolder() ||
-        // if the segment data is written in tablepath then no need to store whole path of file.
+        // if the segment data is written in table path then no need to store whole path of file.
         !blockletIndexModel.getFilePath().startsWith(
             blockletIndexModel.getCarbonTable().getTablePath())) {
       filePath = FilenameUtils.getFullPathNoEndSeparator(path)
@@ -444,7 +444,7 @@
     }
     // create the segment directory path
     String tablePath = segmentPropertiesWrapper.getTableIdentifier().getTablePath();
-    String segmentId = getTableTaskInfo(SUMMARY_SEGMENTID);
+    String segmentId = getTableTaskInfo(SUMMARY_SEGMENT_ID);
     return CarbonTablePath.getSegmentPath(tablePath, segmentId);
   }
 
@@ -460,7 +460,7 @@
     // write the task summary info to unsafe memory store
     if (null != summaryRow) {
       summaryRow.setByteArray(fileName, SUMMARY_INDEX_FILE_NAME);
-      summaryRow.setByteArray(segmentId, SUMMARY_SEGMENTID);
+      summaryRow.setByteArray(segmentId, SUMMARY_SEGMENT_ID);
       if (filePath.length > 0) {
         summaryRow.setByteArray(filePath, SUMMARY_INDEX_PATH);
       }
@@ -560,12 +560,12 @@
 
   @Override
   public boolean isScanRequired(FilterResolverIntf filterExp) {
-    FilterExecuter filterExecuter = FilterUtil.getFilterExecuterTree(
+    FilterExecutor filterExecutor = FilterUtil.getFilterExecutorTree(
         filterExp, getSegmentProperties(), null, getMinMaxCacheColumns(), false);
     IndexRow unsafeRow = taskSummaryDMStore
         .getIndexRow(getTaskSummarySchema(), taskSummaryDMStore.getRowCount() - 1);
     boolean isScanRequired = FilterExpressionProcessor
-        .isScanRequired(filterExecuter, getMinMaxValue(unsafeRow, TASK_MAX_VALUES_INDEX),
+        .isScanRequired(filterExecutor, getMinMaxValue(unsafeRow, TASK_MAX_VALUES_INDEX),
             getMinMaxValue(unsafeRow, TASK_MIN_VALUES_INDEX),
             getMinMaxFlag(unsafeRow, TASK_MIN_MAX_FLAG));
     if (isScanRequired) {
@@ -584,8 +584,8 @@
    */
   protected short getBlockletNumOfEntry(int index) {
     final byte[] bytes = getBlockletRowCountForEachBlock();
-    // if the segment data is written in tablepath
-    // then the reuslt of getBlockletRowCountForEachBlock will be empty.
+    // if the segment data is written in table path
+    // then the result of getBlockletRowCountForEachBlock will be empty.
     if (bytes.length == 0) {
       return 0;
     } else {
@@ -657,7 +657,7 @@
     return blockletToRowCountMap;
   }
 
-  private List<Blocklet> prune(FilterResolverIntf filterExp, FilterExecuter filterExecuter,
+  private List<Blocklet> prune(FilterResolverIntf filterExp, FilterExecutor filterExecutor,
       SegmentProperties segmentProperties) {
     if (memoryDMStore.getRowCount() == 0) {
       return new ArrayList<>();
@@ -685,8 +685,8 @@
       // flag to be used for deciding whether use min/max in executor pruning for BlockletIndex
       boolean useMinMaxForPruning = useMinMaxForExecutorPruning(filterExp);
       if (!validateSegmentProperties(segmentProperties)) {
-        filterExecuter = FilterUtil
-                .getFilterExecuterTree(filterExp, getSegmentProperties(),
+        filterExecutor = FilterUtil
+                .getFilterExecutorTree(filterExp, getSegmentProperties(),
                         null, getMinMaxCacheColumns(), false);
       }
       // min and max for executor pruning
@@ -696,7 +696,7 @@
         String fileName = getFileNameWithFilePath(row, filePath);
         short blockletId = getBlockletId(row);
         boolean isValid =
-            addBlockBasedOnMinMaxValue(filterExecuter, getMinMaxValue(row, MAX_VALUES_INDEX),
+            addBlockBasedOnMinMaxValue(filterExecutor, getMinMaxValue(row, MAX_VALUES_INDEX),
                 getMinMaxValue(row, MIN_VALUES_INDEX), minMaxFlag, fileName, blockletId);
         if (isValid) {
           blocklets.add(createBlocklet(row, fileName, blockletId, useMinMaxForPruning));
@@ -722,14 +722,14 @@
 
   @Override
   public List<Blocklet> prune(Expression expression, SegmentProperties properties,
-      CarbonTable carbonTable, FilterExecuter filterExecuter) {
+      CarbonTable carbonTable, FilterExecutor filterExecutor) {
     return prune(new IndexFilter(properties, carbonTable, expression).getResolver(), properties,
-        filterExecuter, carbonTable);
+        filterExecutor, carbonTable);
   }
 
   @Override
   public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties,
-      FilterExecuter filterExecuter, CarbonTable table) {
+      FilterExecutor filterExecutor, CarbonTable table) {
     if (memoryDMStore.getRowCount() == 0) {
       return new ArrayList<>();
     }
@@ -738,7 +738,7 @@
     // segmentProperties.
     // Its a temporary fix. The Interface Index.prune(FilterResolverIntf filterExp,
     // SegmentProperties segmentProperties, List<PartitionSpec> partitions) should be corrected
-    return prune(filterExp, filterExecuter, segmentProperties);
+    return prune(filterExp, filterExecutor, segmentProperties);
   }
 
   public boolean validatePartitionInfo(List<PartitionSpec> partitions) {
@@ -781,7 +781,7 @@
   /**
    * select the blocks based on column min and max value
    *
-   * @param filterExecuter
+   * @param filterExecutor
    * @param maxValue
    * @param minValue
    * @param minMaxFlag
@@ -789,10 +789,10 @@
    * @param blockletId
    * @return
    */
-  private boolean addBlockBasedOnMinMaxValue(FilterExecuter filterExecuter, byte[][] maxValue,
+  private boolean addBlockBasedOnMinMaxValue(FilterExecutor filterExecutor, byte[][] maxValue,
       byte[][] minValue, boolean[] minMaxFlag, String filePath, int blockletId) {
     BitSet bitSet = null;
-    if (filterExecuter instanceof ImplicitColumnFilterExecutor) {
+    if (filterExecutor instanceof ImplicitColumnFilterExecutor) {
       String uniqueBlockPath;
       CarbonTable carbonTable = segmentPropertiesWrapper.getCarbonTable();
       if (carbonTable.isHivePartitionTable()) {
@@ -810,10 +810,10 @@
       if (blockletId != -1) {
         uniqueBlockPath = uniqueBlockPath + CarbonCommonConstants.FILE_SEPARATOR + blockletId;
       }
-      bitSet = ((ImplicitColumnFilterExecutor) filterExecuter)
+      bitSet = ((ImplicitColumnFilterExecutor) filterExecutor)
           .isFilterValuesPresentInBlockOrBlocklet(maxValue, minValue, uniqueBlockPath, minMaxFlag);
     } else {
-      bitSet = filterExecuter.isScanRequired(maxValue, minValue, minMaxFlag);
+      bitSet = filterExecutor.isScanRequired(maxValue, minValue, minMaxFlag);
     }
     if (!bitSet.isEmpty()) {
       return true;
@@ -933,7 +933,7 @@
           CarbonCommonConstants.DEFAULT_CHARSET);
       fileDetails[1] = new String(unsafeRow.getByteArray(SUMMARY_INDEX_FILE_NAME),
           CarbonCommonConstants.DEFAULT_CHARSET);
-      fileDetails[2] = new String(unsafeRow.getByteArray(SUMMARY_SEGMENTID),
+      fileDetails[2] = new String(unsafeRow.getByteArray(SUMMARY_SEGMENT_ID),
           CarbonCommonConstants.DEFAULT_CHARSET);
       return fileDetails;
     } catch (Exception e) {
@@ -994,7 +994,7 @@
   }
 
   /**
-   * This method will ocnvert safe to unsafe memory DM store
+   * This method will convert safe to unsafe memory DM store
    *
    */
   public void convertToUnsafeDMStore() {
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexFactory.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexFactory.java
index ef665c5..f26f654 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexFactory.java
@@ -63,7 +63,7 @@
 import org.apache.carbondata.core.metadata.schema.table.IndexSchema;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.segmentmeta.SegmentColumnMetaDataInfo;
 import org.apache.carbondata.core.segmentmeta.SegmentMetaDataInfo;
@@ -220,7 +220,7 @@
   /**
    * Using blockLevel minmax values, identify if segment has to be added for further pruning and to
    * load segment index info to cache
-   * @param segment to be identified if needed for loading block indexs
+   * @param segment to be identified if needed for loading block indexes
    * @param segmentMetaDataInfo list of block level min max values
    * @param filter filter expression
    * @param identifiers tableBlockIndexUniqueIdentifiers
@@ -290,11 +290,11 @@
     FilterResolverIntf resolver =
         new IndexFilter(segmentProperties, this.getCarbonTable(), filter.getExpression())
             .getResolver();
-    // prepare filter executer using datmapFilter resolver
-    FilterExecuter filterExecuter =
-        FilterUtil.getFilterExecuterTree(resolver, segmentProperties, null, null, false);
+    // prepare filter executor using IndexFilter resolver
+    FilterExecutor filterExecutor =
+        FilterUtil.getFilterExecutorTree(resolver, segmentProperties, null, null, false);
     // check if block has to be pruned based on segment minmax
-    BitSet scanRequired = filterExecuter.isScanRequired(max, min, minMaxFlag);
+    BitSet scanRequired = filterExecutor.isScanRequired(max, min, minMaxFlag);
     if (!scanRequired.isEmpty()) {
       isScanRequired = true;
     }
@@ -374,7 +374,7 @@
   }
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentId. This method is
+   * Get the blocklet detail information based on blockletId, blockId and segmentId. This method is
    * exclusively for BlockletIndexFactory as detail information is only available in this
    * default index.
    */
@@ -447,19 +447,19 @@
 
   @Override
   public List<IndexInputSplit> toDistributable(Segment segment) {
-    List<IndexInputSplit> distributables = new ArrayList<>();
+    List<IndexInputSplit> distributableList = new ArrayList<>();
     try {
       BlockletIndexInputSplit distributable = new BlockletIndexInputSplit();
       distributable.setSegment(segment);
       distributable.setIndexSchema(INDEX_SCHEMA);
       distributable.setSegmentPath(CarbonTablePath.getSegmentPath(identifier.getTablePath(),
           segment.getSegmentNo()));
-      distributables.add(new IndexInputSplitWrapper(UUID.randomUUID().toString(),
+      distributableList.add(new IndexInputSplitWrapper(UUID.randomUUID().toString(),
           distributable).getDistributable());
     } catch (Exception e) {
       throw new RuntimeException(e);
     }
-    return distributables;
+    return distributableList;
   }
 
   @Override
@@ -680,10 +680,10 @@
   }
 
   @Override
-  public List<IndexInputSplit> getAllUncachedDistributables(
-      List<IndexInputSplit> distributables) throws IOException {
-    List<IndexInputSplit> distributablesToBeLoaded = new ArrayList<>(distributables.size());
-    for (IndexInputSplit distributable : distributables) {
+  public List<IndexInputSplit> getAllUncached(
+      List<IndexInputSplit> distributableList) throws IOException {
+    List<IndexInputSplit> distributableToBeLoaded = new ArrayList<>(distributableList.size());
+    for (IndexInputSplit distributable : distributableList) {
       Segment segment = distributable.getSegment();
       Set<TableBlockIndexUniqueIdentifier> tableBlockIndexUniqueIdentifiers =
           getTableBlockIndexUniqueIdentifiers(segment);
@@ -695,10 +695,10 @@
           new TableBlockIndexUniqueIdentifierWrapper(validIdentifier, this.getCarbonTable()))) {
         ((BlockletIndexInputSplit) distributable)
             .setTableBlockIndexUniqueIdentifier(validIdentifier);
-        distributablesToBeLoaded.add(distributable);
+        distributableToBeLoaded.add(distributable);
       }
     }
-    return distributablesToBeLoaded;
+    return distributableToBeLoaded;
   }
 
   private Set<TableBlockIndexUniqueIdentifier> getTableSegmentUniqueIdentifiers(Segment segment)
@@ -719,9 +719,9 @@
   }
 
   @Override
-  public List<IndexInputSplit> getAllUncachedDistributables(List<Segment> validSegments,
+  public List<IndexInputSplit> getAllUncached(List<Segment> validSegments,
       IndexExprWrapper indexExprWrapper) throws IOException {
-    List<IndexInputSplit> distributablesToBeLoaded = new ArrayList<>();
+    List<IndexInputSplit> distributableToBeLoaded = new ArrayList<>();
     for (Segment segment : validSegments) {
       IndexInputSplitWrapper indexInputSplitWrappers =
           indexExprWrapper.toDistributableSegment(segment);
@@ -733,11 +733,11 @@
         if (identifier.getIndexFilePath() == null || blockletIndexWrapper == null) {
           ((BlockletIndexInputSplit) indexInputSplitWrappers.getDistributable())
               .setTableBlockIndexUniqueIdentifier(identifier);
-          distributablesToBeLoaded.add(indexInputSplitWrappers.getDistributable());
+          distributableToBeLoaded.add(indexInputSplitWrappers.getDistributable());
         }
       }
     }
-    return distributablesToBeLoaded;
+    return distributableToBeLoaded;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexRowIndexes.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexRowIndexes.java
index 421870d..f3b9ce9 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexRowIndexes.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletIndexRowIndexes.java
@@ -33,7 +33,7 @@
 
   int VERSION_INDEX = 4;
 
-  int SCHEMA_UPADATED_TIME_INDEX = 5;
+  int SCHEMA_UPDATED_TIME_INDEX = 5;
 
   int BLOCK_FOOTER_OFFSET = 6;
 
@@ -59,7 +59,7 @@
 
   int SUMMARY_INDEX_FILE_NAME = 3;
 
-  int SUMMARY_SEGMENTID = 4;
+  int SUMMARY_SEGMENT_ID = 4;
 
   int TASK_MIN_MAX_FLAG = 5;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
index bca448d..8fbc7f6 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
@@ -66,12 +66,12 @@
   private static final Logger LOGGER =
       LogServiceFactory.getLogService(SegmentIndexFileStore.class.getName());
   /**
-   * Stores the indexfile name and related binary file data in it.
+   * Stores the index file name and related binary file data in it.
    */
   private Map<String, byte[]> carbonIndexMap;
 
   /**
-   * Stores the indexfile name and related binary file data in it.
+   * Stores the index file name and related binary file data in it.
    */
   private Map<String, byte[]> carbonIndexMapWithFullPath;
 
@@ -168,7 +168,7 @@
    * @param segmentPath
    * @throws IOException
    */
-  public void readAllIndexAndFillBolckletInfo(String segmentPath) throws IOException {
+  public void readAllIndexAndFillBlockletInfo(String segmentPath) throws IOException {
     CarbonFile[] carbonIndexFiles =
         getCarbonIndexFiles(segmentPath, FileFactory.getConfiguration());
     for (int i = 0; i < carbonIndexFiles.length; i++) {
@@ -290,7 +290,7 @@
   }
 
   /**
-   * Read carbonindex file and convert to stream and add to map
+   * Read carbon index file and convert to stream and add to map
    *
    * @param indexFile
    * @throws IOException
@@ -329,7 +329,7 @@
   }
 
   /**
-   * Get the carbonindex file content
+   * Get the carbon index file content
    *
    * @param fileName
    * @return
@@ -468,7 +468,7 @@
           blockIndexReplica
               .setBlock_index(CarbonMetadataUtil.getBlockletIndex(blockletInfo.getBlockletIndex()));
           blockIndexReplica
-              .setBlocklet_info(CarbonMetadataUtil.getBlocletInfo3(blockletInfo));
+              .setBlocklet_info(CarbonMetadataUtil.getBlockletInfo3(blockletInfo));
           blockIndexThrift.add(blockIndexReplica);
         }
       }
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/row/IndexRowImpl.java b/core/src/main/java/org/apache/carbondata/core/indexstore/row/IndexRowImpl.java
index 0cc8f18..0450d03 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/row/IndexRowImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/row/IndexRowImpl.java
@@ -41,7 +41,7 @@
 
   @Override
   public int getLengthInBytes(int ordinal) {
-    // if the segment data is written in tablepath
+    // if the segment data is written in table path
     // then the data[BlockletIndexRowIndexes.SUMMARY_INDEX_PATH] will be null.
     if (data[ordinal] == null) {
       return 0;
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java b/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
index 620d0fb..8527101 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
@@ -73,7 +73,7 @@
   private static void updateBytePosition(CarbonRowSchema[] schema) {
     int currentSize;
     int bytePosition = 0;
-    // First assign byte postion to all the fixed length schema
+    // First assign byte position to all the fixed length schema
     for (int i = 0; i < schema.length; i++) {
       switch (schema[i].getSchemaType()) {
         case STRUCT:
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/DateDirectDictionaryGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/DateDirectDictionaryGenerator.java
index 6ef987f..2325f01 100644
--- a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/DateDirectDictionaryGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/DateDirectDictionaryGenerator.java
@@ -129,7 +129,7 @@
       }
       dateToStr = null;
     }
-    //adding +2 to reserve the first cuttOffDiff value for null or empty date
+    //adding +2 to reserve the first cutOffDiff value for null or empty date
     if (null == dateToStr) {
       return CarbonCommonConstants.DIRECT_DICT_VALUE_NULL;
     } else {
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
index 3eb6f65..e1927e4 100644
--- a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
@@ -169,7 +169,7 @@
       }
       dateToStr = null;
     }
-    //adding +2 to reserve the first cuttOffDiff value for null or empty date
+    //adding +2 to reserve the first cutOffDiff value for null or empty date
     if (null == dateToStr) {
       return CarbonCommonConstants.DIRECT_DICT_VALUE_NULL;
     } else {
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
index a624bab..2af61e9 100644
--- a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
@@ -29,7 +29,7 @@
    */
   String CARBON_CUTOFF_TIMESTAMP = "carbon.cutOffTimestamp";
   /**
-   * The property to set the timestamp (ie milis) conversion to the SECOND, MINUTE, HOUR
+   * The property to set the timestamp (ie millisecond) conversion to the SECOND, MINUTE, HOUR
    * or DAY level
    */
   String CARBON_TIME_GRANULARITY = "carbon.timegranularity";
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java
index 9ce585a..548292f 100644
--- a/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java
@@ -44,9 +44,9 @@
    */
   private int[] lens;
   /**
-   * wsize.
+   * word size.
    */
-  private int wsize;
+  private int wSize;
   /**
    * byteSize.
    */
@@ -56,11 +56,11 @@
     this.lens = lens;
     this.length = getTotalLength(lens);
 
-    wsize = length / LONG_LENGTH;
+    wSize = length / LONG_LENGTH;
     byteSize = length / 8;
 
     if (length % LONG_LENGTH != 0) {
-      wsize++;
+      wSize++;
     }
 
     if (length % 8 != 0) {
@@ -116,7 +116,7 @@
   }
 
   protected long[] get(long[] keys) {
-    long[] words = new long[wsize];
+    long[] words = new long[wSize];
     int ll = 0;
     int minLength = Math.min(lens.length, keys.length);
     for (int i = minLength - 1; i >= 0; i--) {
@@ -153,7 +153,7 @@
   }
 
   protected long[] get(int[] keys) {
-    long[] words = new long[wsize];
+    long[] words = new long[wSize];
     int ll = 0;
     int minLength = Math.min(lens.length, keys.length);
     for (int i = minLength - 1; i >= 0; i--) {
@@ -191,7 +191,7 @@
   }
 
   private long[] getArray(long[] words) {
-    long[] vals = new long[lens.length];
+    long[] values = new long[lens.length];
     int ll = 0;
     for (int i = lens.length - 1; i >= 0; i--) {
 
@@ -200,8 +200,8 @@
       long val = words[index];
       long mask = (LONG_MAX >>> (MAX_LENGTH - lens[i]));
       mask = mask << pos;
-      vals[i] = (val & mask);
-      vals[i] >>>= pos;
+      values[i] = (val & mask);
+      values[i] >>>= pos;
       ll += lens[i];
 
       int nextIndex = ll >> 6;
@@ -211,11 +211,11 @@
         if (pos != 0) {
           mask = (LONG_MAX >>> (MAX_LENGTH - pos));
           val = words[nextIndex];
-          vals[i] = vals[i] | ((val & mask) << (lens[i] - pos));
+          values[i] = values[i] | ((val & mask) << (lens[i] - pos));
         }
       }
     }
-    return vals;
+    return values;
   }
 
   public byte[] getBytes(long[] keys) {
@@ -255,7 +255,7 @@
 
     int length = 8;
     int ls = byteSize;
-    long[] words = new long[wsize];
+    long[] words = new long[wSize];
     for (int i = 0; i < words.length; i++) {
       long l = 0;
       ls -= 8;
@@ -281,7 +281,7 @@
 
     int length = 8;
     int ls = byteSize;
-    long[] words = new long[wsize];
+    long[] words = new long[wSize];
     for (int i = 0; i < words.length; i++) {
       long l = 0;
       ls -= 8;
diff --git a/core/src/main/java/org/apache/carbondata/core/localdictionary/dictionaryholder/MapBasedDictionaryStore.java b/core/src/main/java/org/apache/carbondata/core/localdictionary/dictionaryholder/MapBasedDictionaryStore.java
index 89a26fe..4581962 100644
--- a/core/src/main/java/org/apache/carbondata/core/localdictionary/dictionaryholder/MapBasedDictionaryStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/localdictionary/dictionaryholder/MapBasedDictionaryStore.java
@@ -43,7 +43,7 @@
 
   /**
    * maintaining array for reverse lookup
-   * otherwise iterating everytime in map for reverse lookup will be slowdown the performance
+   * otherwise iterating every time in map for reverse lookup will be slowdown the performance
    * It will only maintain the reference
    */
   private DictionaryByteArrayWrapper[] referenceDictionaryArray;
@@ -64,7 +64,7 @@
   private boolean isThresholdReached;
 
   /**
-   * current datasize
+   * current data size
    */
   private long currentSize;
 
@@ -92,13 +92,13 @@
     Integer value = dictionary.get(key);
     // if value is null then dictionary is not present in store
     if (null == value) {
-      // aquire the lock
+      // acquire the lock
       synchronized (dictionary) {
         // check threshold
         checkIfThresholdReached();
         // get the value again as other thread might have added
         value = dictionary.get(key);
-        // double chekcing
+        // double checking
         if (null == value) {
           // increment the value
           value = ++lastAssignValue;
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/AlluxioFileLock.java b/core/src/main/java/org/apache/carbondata/core/locks/AlluxioFileLock.java
index e871f53..a20c302 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/AlluxioFileLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/AlluxioFileLock.java
@@ -26,7 +26,7 @@
 
 /**
  * This class is used to handle the Alluxio File locking.
- * This is acheived using the concept of acquiring the data out stream using Append option.
+ * This is achieved by using the concept of acquiring the data out stream using Append option.
  */
 public class AlluxioFileLock extends HdfsFileLock {
 
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
index 2330494..6f4bb1d 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
@@ -69,7 +69,7 @@
       absoluteLockPath = absoluteTableIdentifier.getTablePath();
     } else {
       absoluteLockPath =
-          getLockpath(absoluteTableIdentifier.getCarbonTableIdentifier().getTableId());
+          getLockPath(absoluteTableIdentifier.getCarbonTableIdentifier().getTableId());
     }
     FileFactory.FileType fileType = FileFactory.getFileType(absoluteLockPath);
     if (lockTypeConfigured.equals(CarbonCommonConstants.CARBON_LOCK_TYPE_CUSTOM)) {
@@ -103,7 +103,7 @@
     if (lockPath.isEmpty()) {
       lockFileLocation = locFileLocation;
     } else {
-      lockFileLocation = getLockpath("1");
+      lockFileLocation = getLockPath("1");
     }
     switch (lockTypeConfigured) {
       case CarbonCommonConstants.CARBON_LOCK_TYPE_CUSTOM:
@@ -139,7 +139,7 @@
     CarbonLockFactory.lockConstructor = getCustomLockConstructor(lockClassName);
   }
 
-  public static String getLockpath(String tableId) {
+  public static String getLockPath(String tableId) {
     return lockPath + CarbonCommonConstants.FILE_SEPARATOR + tableId;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
index f940ddb..6b6e607 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
@@ -43,29 +43,29 @@
    *
    * @param carbonLock
    */
-  public static void fileUnlock(ICarbonLock carbonLock, String locktype) {
+  public static void fileUnlock(ICarbonLock carbonLock, String lockType) {
     if (carbonLock.unlock()) {
-      if (locktype.equals(LockUsage.METADATA_LOCK)) {
+      if (lockType.equals(LockUsage.METADATA_LOCK)) {
         LOGGER.info("Metadata lock has been successfully released");
-      } else if (locktype.equals(LockUsage.TABLE_STATUS_LOCK)) {
+      } else if (lockType.equals(LockUsage.TABLE_STATUS_LOCK)) {
         LOGGER.info("Table status lock has been successfully released");
-      } else if (locktype.equals(LockUsage.CLEAN_FILES_LOCK)) {
+      } else if (lockType.equals(LockUsage.CLEAN_FILES_LOCK)) {
         LOGGER.info("Clean files lock has been successfully released");
-      } else if (locktype.equals(LockUsage.DELETE_SEGMENT_LOCK)) {
+      } else if (lockType.equals(LockUsage.DELETE_SEGMENT_LOCK)) {
         LOGGER.info("Delete segments lock has been successfully released");
-      } else if (locktype.equals(LockUsage.INDEX_STATUS_LOCK)) {
+      } else if (lockType.equals(LockUsage.INDEX_STATUS_LOCK)) {
         LOGGER.info("Index status lock has been successfully released");
       }
     } else {
-      if (locktype.equals(LockUsage.METADATA_LOCK)) {
+      if (lockType.equals(LockUsage.METADATA_LOCK)) {
         LOGGER.error("Not able to release the metadata lock");
-      } else if (locktype.equals(LockUsage.TABLE_STATUS_LOCK)) {
+      } else if (lockType.equals(LockUsage.TABLE_STATUS_LOCK)) {
         LOGGER.error("Not able to release the table status lock");
-      } else if (locktype.equals(LockUsage.CLEAN_FILES_LOCK)) {
+      } else if (lockType.equals(LockUsage.CLEAN_FILES_LOCK)) {
         LOGGER.info("Not able to release the clean files lock");
-      } else if (locktype.equals(LockUsage.DELETE_SEGMENT_LOCK)) {
+      } else if (lockType.equals(LockUsage.DELETE_SEGMENT_LOCK)) {
         LOGGER.info("Not able to release the delete segments lock");
-      } else if (locktype.equals(LockUsage.INDEX_STATUS_LOCK)) {
+      } else if (lockType.equals(LockUsage.INDEX_STATUS_LOCK)) {
         LOGGER.info("Not able to release the index status lock");
       }
     }
@@ -130,7 +130,7 @@
       lockFilesDir = CarbonTablePath.getLockFilesDirPath(absoluteTableIdentifier.getTablePath());
     } else {
       lockFilesDir = CarbonTablePath.getLockFilesDirPath(
-          CarbonLockFactory.getLockpath(carbonTable.getTableInfo().getFactTable().getTableId()));
+          CarbonLockFactory.getLockPath(carbonTable.getTableInfo().getFactTable().getTableId()));
     }
     CarbonFile[] files = FileFactory.getCarbonFile(lockFilesDir)
         .listFiles(new CarbonFileFilter() {
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java b/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
index ca1343e..95a3ffc 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
@@ -28,7 +28,7 @@
 
 /**
  * This class is used to handle the HDFS File locking.
- * This is achieved using the concept of acquiring the data out stream using Append option.
+ * This is achieved by using the concept of acquiring the data out stream using Append option.
  */
 public class HdfsFileLock extends AbstractCarbonLock {
 
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/ICarbonLock.java b/core/src/main/java/org/apache/carbondata/core/locks/ICarbonLock.java
index ab20a5e..3b7ee5a 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/ICarbonLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/ICarbonLock.java
@@ -30,7 +30,7 @@
   boolean unlock();
 
   /**
-   * This will acquire the lock and if it doesnt get then it will retry after the confiured time.
+   * This will acquire the lock and if it doesnt get then it will retry after the configured time.
    *
    * @return
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java b/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
index 5f2b4c4..0720c97 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
@@ -33,7 +33,7 @@
 
 /**
  * This class handles the file locking in the local file system.
- * This will be handled using the file channel lock API.
+ * This will be handled by using the file channel lock API.
  */
 public class LocalFileLock extends AbstractCarbonLock {
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/LockUsage.java b/core/src/main/java/org/apache/carbondata/core/locks/LockUsage.java
index e032b53..8098bc6 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/LockUsage.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/LockUsage.java
@@ -18,7 +18,7 @@
 package org.apache.carbondata.core.locks;
 
 /**
- * This enum is used to define the usecase of the lock.
+ * This enum is used to define the use case of the lock.
  * Each enum value is one specific lock case.
  */
 public class LockUsage {
@@ -26,7 +26,7 @@
   public static final String METADATA_LOCK = "meta.lock";
   public static final String COMPACTION_LOCK = "compaction.lock";
   public static final String HANDOFF_LOCK = "handoff.lock";
-  public static final String SYSTEMLEVEL_COMPACTION_LOCK = "system_level_compaction.lock";
+  public static final String SYSTEM_LEVEL_COMPACTION_LOCK = "system_level_compaction.lock";
   public static final String ALTER_PARTITION_LOCK = "alter_partition.lock";
   public static final String TABLE_STATUS_LOCK = "tablestatus.lock";
   public static final String TABLE_UPDATE_STATUS_LOCK = "tableupdatestatus.lock";
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/S3FileLock.java b/core/src/main/java/org/apache/carbondata/core/locks/S3FileLock.java
index 1b8688c..48e19c4 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/S3FileLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/S3FileLock.java
@@ -29,7 +29,7 @@
 
 /**
  * This class is used to handle the S3 File locking.
- * This is acheived using the concept of acquiring the data out stream using Append option.
+ * This is achieved by using the concept of acquiring the data out stream using Append option.
  */
 public class S3FileLock extends AbstractCarbonLock {
 
diff --git a/core/src/main/java/org/apache/carbondata/core/locks/ZooKeeperLocking.java b/core/src/main/java/org/apache/carbondata/core/locks/ZooKeeperLocking.java
index 0dc8bbf..844f0f8 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/ZooKeeperLocking.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/ZooKeeperLocking.java
@@ -99,7 +99,7 @@
       createBaseNode();
       // if exists returns null then path doesnt exist. so creating.
       if (null == zk.exists(this.tableIdFolder, true)) {
-        createRecursivly(this.tableIdFolder);
+        createRecursively(this.tableIdFolder);
       }
       // if exists returns null then path doesnt exist. so creating.
       if (null == zk.exists(this.lockTypeFolder, true)) {
@@ -122,15 +122,15 @@
   }
 
   /**
-   * Create zookeepr node if not exist
+   * Create zookeeper node if not exist
    * @param path
    * @throws KeeperException
    * @throws InterruptedException
    */
-  private void createRecursivly(String path) throws KeeperException, InterruptedException {
+  private void createRecursively(String path) throws KeeperException, InterruptedException {
     if (zk.exists(path, true) == null && path.length() > 0) {
       String temp = path.substring(0, path.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR));
-      createRecursivly(temp);
+      createRecursively(temp);
       zk.create(path, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
     }
   }
@@ -149,7 +149,7 @@
       // get the children present in zooKeeperLocation.
       List<String> nodes = zk.getChildren(this.lockTypeFolder, null);
 
-      // sort the childrens
+      // sort the children
       Collections.sort(nodes);
 
       // here the logic is , for each lock request zookeeper will create a file ending with
diff --git a/core/src/main/java/org/apache/carbondata/core/memory/CarbonUnsafe.java b/core/src/main/java/org/apache/carbondata/core/memory/CarbonUnsafe.java
index ecadd1e..1743595 100644
--- a/core/src/main/java/org/apache/carbondata/core/memory/CarbonUnsafe.java
+++ b/core/src/main/java/org/apache/carbondata/core/memory/CarbonUnsafe.java
@@ -36,7 +36,7 @@
 
   public static final int FLOAT_ARRAY_OFFSET;
 
-  public static final boolean ISLITTLEENDIAN =
+  public static final boolean IS_LITTLE_ENDIAN =
       ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
 
   private static Unsafe unsafe;
diff --git a/core/src/main/java/org/apache/carbondata/core/memory/MemoryBlock.java b/core/src/main/java/org/apache/carbondata/core/memory/MemoryBlock.java
index 563521a..3c7f1b0 100644
--- a/core/src/main/java/org/apache/carbondata/core/memory/MemoryBlock.java
+++ b/core/src/main/java/org/apache/carbondata/core/memory/MemoryBlock.java
@@ -33,7 +33,7 @@
   private boolean isFreed;
 
   /**
-   * Whether it is offheap or onheap memory type
+   * Whether it is off-heap or on-heap memory type
    */
   private MemoryType memoryType;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java b/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
index 466c023..22f2214 100644
--- a/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
@@ -44,7 +44,7 @@
   private static boolean offHeap = Boolean.parseBoolean(CarbonProperties.getInstance()
       .getProperty(CarbonCommonConstants.ENABLE_OFFHEAP_SORT,
           CarbonCommonConstants.ENABLE_OFFHEAP_SORT_DEFAULT));
-  private static Map<String, Set<MemoryBlock>> taskIdToOffheapMemoryBlockMap;
+  private static Map<String, Set<MemoryBlock>> taskIdToOffHeapMemoryBlockMap;
   static {
     long size = 0L;
     String configuredWorkingMemorySize = null;
@@ -71,7 +71,7 @@
         }
       }
     } catch (Exception e) {
-      LOGGER.info("Invalid offheap working memory size value: " + configuredWorkingMemorySize);
+      LOGGER.info("Invalid off-heap working memory size value: " + configuredWorkingMemorySize);
     }
     long takenSize = size;
     MemoryType memoryType;
@@ -81,7 +81,7 @@
       if (takenSize < defaultSize) {
         takenSize = defaultSize;
         LOGGER.warn(String.format(
-            "It is not recommended to set offheap working memory size less than %sMB,"
+            "It is not recommended to set off-heap working memory size less than %sMB,"
                 + " so setting default value to %d",
             CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT, defaultSize));
       }
@@ -92,7 +92,7 @@
       memoryType = MemoryType.ONHEAP;
     }
     INSTANCE = new UnsafeMemoryManager(takenSize, memoryType);
-    taskIdToOffheapMemoryBlockMap = new HashMap<>();
+    taskIdToOffHeapMemoryBlockMap = new HashMap<>();
   }
 
   public static final UnsafeMemoryManager INSTANCE;
@@ -106,7 +106,7 @@
   private UnsafeMemoryManager(long totalMemory, MemoryType memoryType) {
     this.totalMemory = totalMemory;
     this.memoryType = memoryType;
-    LOGGER.info("Offheap Working Memory manager is created with size " + totalMemory + " with "
+    LOGGER.info("Off-heap Working Memory manager is created with size " + totalMemory + " with "
         + memoryType);
   }
 
@@ -116,14 +116,14 @@
     if (memoryUsed + memoryRequested <= totalMemory && memoryType == MemoryType.OFFHEAP) {
       memoryBlock = MemoryAllocator.UNSAFE.allocate(memoryRequested);
       memoryUsed += memoryBlock.size();
-      Set<MemoryBlock> listOfMemoryBlock = taskIdToOffheapMemoryBlockMap.get(taskId);
+      Set<MemoryBlock> listOfMemoryBlock = taskIdToOffHeapMemoryBlockMap.get(taskId);
       if (null == listOfMemoryBlock) {
         listOfMemoryBlock = new HashSet<>();
-        taskIdToOffheapMemoryBlockMap.put(taskId, listOfMemoryBlock);
+        taskIdToOffHeapMemoryBlockMap.put(taskId, listOfMemoryBlock);
       }
       listOfMemoryBlock.add(memoryBlock);
       if (LOGGER.isDebugEnabled()) {
-        LOGGER.debug(String.format("Creating Offheap working Memory block (%s) with size %d."
+        LOGGER.debug(String.format("Creating off-heap working Memory block (%s) with size %d."
                 + " Total memory used %d Bytes, left %d Bytes.",
             memoryBlock.toString(), memoryBlock.size(), memoryUsed, totalMemory - memoryUsed));
       }
@@ -132,22 +132,22 @@
       memoryBlock = MemoryAllocator.HEAP.allocate(memoryRequested);
       if (LOGGER.isDebugEnabled()) {
         LOGGER.debug(String
-            .format("Creating onheap working Memory block with size: (%d)", memoryBlock.size()));
+            .format("Creating on-heap working Memory block with size: (%d)", memoryBlock.size()));
       }
     }
     return memoryBlock;
   }
 
   public synchronized void freeMemory(String taskId, MemoryBlock memoryBlock) {
-    if (taskIdToOffheapMemoryBlockMap.containsKey(taskId)) {
-      taskIdToOffheapMemoryBlockMap.get(taskId).remove(memoryBlock);
+    if (taskIdToOffHeapMemoryBlockMap.containsKey(taskId)) {
+      taskIdToOffHeapMemoryBlockMap.get(taskId).remove(memoryBlock);
     }
     if (!memoryBlock.isFreedStatus()) {
       getMemoryAllocator(memoryBlock.getMemoryType()).free(memoryBlock);
       memoryUsed -= memoryBlock.size();
       memoryUsed = memoryUsed < 0 ? 0 : memoryUsed;
       if (LOGGER.isDebugEnabled() && memoryBlock.getMemoryType() == MemoryType.OFFHEAP) {
-        LOGGER.debug(String.format("Freeing offheap working memory block (%s) with size: %d, "
+        LOGGER.debug(String.format("Freeing off-heap working memory block (%s) with size: %d, "
                 + "current available memory is: %d", memoryBlock.toString(), memoryBlock.size(),
             totalMemory - memoryUsed));
       }
@@ -156,29 +156,29 @@
 
   public synchronized void freeMemoryAll(String taskId) {
     Set<MemoryBlock> memoryBlockSet;
-    memoryBlockSet = taskIdToOffheapMemoryBlockMap.remove(taskId);
-    long occuppiedMemory = 0;
+    memoryBlockSet = taskIdToOffHeapMemoryBlockMap.remove(taskId);
+    long occupiedMemory = 0;
     if (null != memoryBlockSet) {
       Iterator<MemoryBlock> iterator = memoryBlockSet.iterator();
       MemoryBlock memoryBlock = null;
       while (iterator.hasNext()) {
         memoryBlock = iterator.next();
         if (!memoryBlock.isFreedStatus()) {
-          occuppiedMemory += memoryBlock.size();
+          occupiedMemory += memoryBlock.size();
           getMemoryAllocator(memoryBlock.getMemoryType()).free(memoryBlock);
         }
       }
     }
-    memoryUsed -= occuppiedMemory;
+    memoryUsed -= occupiedMemory;
     memoryUsed = memoryUsed < 0 ? 0 : memoryUsed;
     if (LOGGER.isDebugEnabled()) {
       LOGGER.debug(String.format(
-          "Freeing offheap working memory of size %d. Current available memory is %d",
-          occuppiedMemory, totalMemory - memoryUsed));
+          "Freeing off-heap working memory of size %d. Current available memory is %d",
+          occupiedMemory, totalMemory - memoryUsed));
     }
     LOGGER.info(String.format(
-        "Total offheap working memory used after task %s is %d. Current running tasks are %s",
-        taskId, memoryUsed, StringUtils.join(taskIdToOffheapMemoryBlockMap.keySet(), ", ")));
+        "Total off-heap working memory used after task %s is %d. Current running tasks are %s",
+        taskId, memoryUsed, StringUtils.join(taskIdToOffHeapMemoryBlockMap.keySet(), ", ")));
   }
 
   public long getUsableMemory() {
diff --git a/core/src/main/java/org/apache/carbondata/core/memory/UnsafeSortMemoryManager.java b/core/src/main/java/org/apache/carbondata/core/memory/UnsafeSortMemoryManager.java
index 07e1a55..68b2354 100644
--- a/core/src/main/java/org/apache/carbondata/core/memory/UnsafeSortMemoryManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/memory/UnsafeSortMemoryManager.java
@@ -43,14 +43,14 @@
       LogServiceFactory.getLogService(UnsafeSortMemoryManager.class.getName());
 
   /**
-   * offheap is enabled
+   * off-heap is enabled
    */
   private static boolean offHeap = Boolean.parseBoolean(CarbonProperties.getInstance()
       .getProperty(CarbonCommonConstants.ENABLE_OFFHEAP_SORT,
           CarbonCommonConstants.ENABLE_OFFHEAP_SORT_DEFAULT));
 
   /**
-   * map to keep taskid to memory blocks
+   * map to keep task id to memory blocks
    */
   private static Map<String, Set<MemoryBlock>> taskIdToMemoryBlockMap;
 
@@ -145,24 +145,24 @@
   public synchronized void freeMemoryAll(String taskId) {
     Set<MemoryBlock> memoryBlockSet = null;
     memoryBlockSet = taskIdToMemoryBlockMap.remove(taskId);
-    long occuppiedMemory = 0;
+    long occupiedMemory = 0;
     if (null != memoryBlockSet) {
       Iterator<MemoryBlock> iterator = memoryBlockSet.iterator();
       MemoryBlock memoryBlock = null;
       while (iterator.hasNext()) {
         memoryBlock = iterator.next();
         if (!memoryBlock.isFreedStatus()) {
-          occuppiedMemory += memoryBlock.size();
+          occupiedMemory += memoryBlock.size();
           allocator.free(memoryBlock);
         }
       }
     }
-    memoryUsed -= occuppiedMemory;
+    memoryUsed -= occupiedMemory;
     memoryUsed = memoryUsed < 0 ? 0 : memoryUsed;
     if (LOGGER.isDebugEnabled()) {
       LOGGER.debug(
           String.format("Freeing sort memory of size: %d, current available memory is: %d",
-              occuppiedMemory, totalMemory - memoryUsed));
+              occupiedMemory, totalMemory - memoryUsed));
     }
     LOGGER.info(String.format(
         "Total sort memory used after task %s is %d. Current running tasks are: %s",
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/CarbonMetadata.java b/core/src/main/java/org/apache/carbondata/core/metadata/CarbonMetadata.java
index f25496f..7c7382b 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/CarbonMetadata.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/CarbonMetadata.java
@@ -34,7 +34,7 @@
   /**
    * meta data instance
    */
-  private static final CarbonMetadata CARBONMETADATAINSTANCE = new CarbonMetadata();
+  private static final CarbonMetadata INSTANCE = new CarbonMetadata();
 
   /**
    * holds the list of tableInfo currently present
@@ -47,16 +47,16 @@
   }
 
   public static CarbonMetadata getInstance() {
-    return CARBONMETADATAINSTANCE;
+    return INSTANCE;
   }
 
   /**
    * removed the table information
    *
-   * @param tableUniquName
+   * @param tableUniqueName
    */
-  public void removeTable(String tableUniquName) {
-    tableInfoMap.remove(convertToLowerCase(tableUniquName));
+  public void removeTable(String tableUniqueName) {
+    tableInfoMap.remove(convertToLowerCase(tableUniqueName));
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/CarbonTableIdentifier.java b/core/src/main/java/org/apache/carbondata/core/metadata/CarbonTableIdentifier.java
index ca8b31e..a702899 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/CarbonTableIdentifier.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/CarbonTableIdentifier.java
@@ -87,7 +87,7 @@
   }
 
   /**
-   *Creates the key for bad record lgger.
+   *Creates the key for bad record logger.
    */
   public String getBadRecordLoggerKey() {
     return databaseName + File.separator + tableName + '_' + tableId;
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 545d5b6..917d68a 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -98,12 +98,12 @@
   }
 
   /**
-   * Write segment information to the segment folder with indexfilename and
+   * Write segment information to the segment folder with index file name and
    * corresponding partitions.
    */
   public static void writeSegmentFile(String tablePath, final String taskNo, String location,
-      String timeStamp, List<String> partionNames) throws IOException {
-    writeSegmentFile(tablePath, taskNo, location, timeStamp, partionNames, false);
+      String timeStamp, List<String> partitionNames) throws IOException {
+    writeSegmentFile(tablePath, taskNo, location, timeStamp, partitionNames, false);
   }
 
   /**
@@ -157,11 +157,11 @@
   }
 
   /**
-   * Write segment information to the segment folder with indexfilename and
+   * Write segment information to the segment folder with index file name and
    * corresponding partitions.
    */
   public static void writeSegmentFile(String tablePath, final String taskNo, String location,
-      String timeStamp, List<String> partionNames, boolean isMergeIndexFlow) throws IOException {
+      String timeStamp, List<String> partitionNames, boolean isMergeIndexFlow) throws IOException {
     String tempFolderLoc = timeStamp + ".tmp";
     String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath) + "/" + tempFolderLoc;
     CarbonFile carbonFile = FileFactory.getCarbonFile(writePath);
@@ -176,8 +176,8 @@
           .getCarbonFile(location + CarbonCommonConstants.FILE_SEPARATOR + tempFolderLoc);
     }
 
-    if ((tempFolder.exists() && partionNames.size() > 0) || (isMergeIndexFlow
-        && partionNames.size() > 0)) {
+    if ((tempFolder.exists() && partitionNames.size() > 0) || (isMergeIndexFlow
+        && partitionNames.size() > 0)) {
       CarbonFile[] carbonFiles = tempFolder.listFiles(new CarbonFileFilter() {
         @Override
         public boolean accept(CarbonFile file) {
@@ -195,7 +195,7 @@
         SegmentFile segmentFile = new SegmentFile();
         FolderDetails folderDetails = new FolderDetails();
         folderDetails.setRelative(isRelative);
-        folderDetails.setPartitions(partionNames);
+        folderDetails.setPartitions(partitionNames);
         folderDetails.setStatus(SegmentStatus.SUCCESS.getMessage());
         for (CarbonFile file : carbonFiles) {
           if (file.getName().endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)) {
@@ -208,7 +208,7 @@
         String path = null;
         if (isMergeIndexFlow) {
           // in case of merge index flow, tasks are launched per partition and all the tasks
-          // will be writting to the same tmp folder, in that case taskNo is not unique.
+          // will be written to the same tmp folder, in that case taskNo is not unique.
           // To generate a unique fileName UUID is used
           path = writePath + "/" + CarbonUtil.generateUUID() + CarbonTablePath.SEGMENT_EXT;
         } else {
@@ -588,7 +588,7 @@
             CarbonCommonConstants.MAX_TIMEOUT_FOR_CONCURRENT_LOCK_DEFAULT);
     try {
       if (carbonLock.lockWithRetries(retryCount, maxTimeout)) {
-        LOGGER.info("Acquired lock for tablepath" + tablePath + " for table status updation");
+        LOGGER.info("Acquired lock for table path" + tablePath + " for table status update");
         LoadMetadataDetails[] listOfLoadFolderDetailsArray =
             SegmentStatusManager.readLoadMetadata(metadataPath);
 
@@ -622,15 +622,15 @@
         status = true;
       } else {
         LOGGER.error(
-            "Not able to acquire the lock for Table status updation for table path " + tablePath);
+            "Not able to acquire the lock for Table status update for table path " + tablePath);
       }
 
     } finally {
       if (carbonLock.unlock()) {
-        LOGGER.info("Table unlocked successfully after table status updation" + tablePath);
+        LOGGER.info("Table unlocked successfully after table status update" + tablePath);
       } else {
         LOGGER.error(
-            "Unable to unlock Table lock for table" + tablePath + " during table status updation");
+            "Unable to unlock Table lock for table" + tablePath + " during table status update");
       }
     }
     return status;
@@ -803,7 +803,7 @@
     for (Map.Entry<String, byte[]> entry : carbonIndexMap.entrySet()) {
       List<DataFileFooter> indexInfo =
           fileFooterConverter.getIndexInfo(entry.getKey(), entry.getValue());
-      // carbonindex file stores blocklets so block filename will be duplicated, use set to remove
+      // carbon index file stores blocklets so block filename will be duplicated, use set to remove
       // duplicates
       Set<String> blocks = new LinkedHashSet<>();
       for (DataFileFooter footer : indexInfo) {
@@ -886,7 +886,7 @@
         String location = entry.getKey();
         if (entry.getValue().isRelative) {
           if (location.equals("/")) {
-            // incase of flat folder, the relative segment location is '/',
+            // in case of flat folder, the relative segment location is '/',
             // so don't append it as we again add file separator for file names.
             location = tablePath;
           } else {
@@ -980,7 +980,7 @@
               + CarbonTablePath.SEGMENT_EXT;
       writeSegmentFile(segmentFile, writePath);
     }
-    // Check whether we can completly remove the segment.
+    // Check whether we can completely remove the segment.
     boolean deleteSegment = true;
     for (Map.Entry<String, FolderDetails> entry : segmentFile.getLocationMap().entrySet()) {
       if (entry.getValue().getStatus().equals(SegmentStatus.SUCCESS.getMessage())) {
@@ -1055,7 +1055,7 @@
         }
         for (Map.Entry<String, List<String>> entry : fileStore.indexFilesMap.entrySet()) {
           String indexFile = entry.getKey();
-          // Check the partition information in the partiton mapper
+          // Check the partition information in the partition mapper
           Long fileTimestamp = CarbonUpdateUtil.getTimeStampAsLong(indexFile
               .substring(indexFile.lastIndexOf(CarbonCommonConstants.HYPHEN) + 1,
                   indexFile.length() - CarbonTablePath.INDEX_FILE_EXT.length()));
@@ -1131,15 +1131,15 @@
    */
   private static void deletePhysicalPartition(List<PartitionSpec> partitionSpecs,
       Map<String, List<String>> locationMap, List<String> indexOrMergeFiles, String tablePath) {
-    for (String indexOrMergFile : indexOrMergeFiles) {
+    for (String indexOrMergeFile : indexOrMergeFiles) {
       if (null != partitionSpecs) {
-        Path location = new Path(indexOrMergFile);
+        Path location = new Path(indexOrMergeFile);
         boolean exists = pathExistsInPartitionSpec(partitionSpecs, location);
         if (!exists) {
           FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.toString()));
         }
       } else {
-        Path location = new Path(indexOrMergFile);
+        Path location = new Path(indexOrMergeFile);
         FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.toString()));
       }
     }
@@ -1300,7 +1300,7 @@
   }
 
   /**
-   * Returs the current partition specs of this segment
+   * get the current partition specs of this segment
    * @return
    */
   public List<PartitionSpec> getPartitionSpecs() {
@@ -1320,7 +1320,7 @@
   }
 
   /**
-   * This method returns the list of indx/merge index files for a segment in carbonTable.
+   * This method returns the list of index/merge index files for a segment in carbonTable.
    */
   public static Set<String> getIndexFilesListForSegment(Segment segment, String tablePath)
       throws IOException {
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java
index 63c7040..ab74e4d 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java
@@ -164,20 +164,20 @@
   public void write(DataOutput output) throws IOException {
     output.writeLong(dimensionOffset);
     output.writeLong(measureOffsets);
-    int dsize = dimensionChunkOffsets != null ? dimensionChunkOffsets.size() : 0;
-    output.writeShort(dsize);
-    for (int i = 0; i < dsize; i++) {
+    int dimensionSize = dimensionChunkOffsets != null ? dimensionChunkOffsets.size() : 0;
+    output.writeShort(dimensionSize);
+    for (int i = 0; i < dimensionSize; i++) {
       output.writeLong(dimensionChunkOffsets.get(i));
     }
-    for (int i = 0; i < dsize; i++) {
+    for (int i = 0; i < dimensionSize; i++) {
       output.writeInt(dimensionChunksLength.get(i));
     }
-    int mSize = measureChunkOffsets != null ? measureChunkOffsets.size() : 0;
-    output.writeShort(mSize);
-    for (int i = 0; i < mSize; i++) {
+    int measureSize = measureChunkOffsets != null ? measureChunkOffsets.size() : 0;
+    output.writeShort(measureSize);
+    for (int i = 0; i < measureSize; i++) {
       output.writeLong(measureChunkOffsets.get(i));
     }
-    for (int i = 0; i < mSize; i++) {
+    for (int i = 0; i < measureSize; i++) {
       output.writeInt(measureChunksLength.get(i));
     }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
index 406e30d..fa2cf50 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
@@ -483,7 +483,7 @@
   }
 
   /* (non-Javadoc)
-   * convert from external to wrapper columnschema
+   * convert from external to wrapper column schema
    */
   @Override
   public ColumnSchema fromExternalToWrapperColumnSchema(
@@ -558,7 +558,7 @@
   }
 
   /* (non-Javadoc)
-   * convert from external to wrapper tableschema
+   * convert from external to wrapper table schema
    */
   @Override
   public TableSchema fromExternalToWrapperTableSchema(
@@ -603,7 +603,7 @@
   }
 
   /* (non-Javadoc)
-   * convert from external to wrapper tableinfo
+   * convert from external to wrapper table info
    */
   @Override
   public TableInfo fromExternalToWrapperTableInfo(
@@ -625,10 +625,10 @@
   }
 
   private List<ParentColumnTableRelation> fromExternalToWrapperParentTableColumnRelations(
-      List<org.apache.carbondata.format.ParentColumnTableRelation> thirftParentColumnRelation) {
+      List<org.apache.carbondata.format.ParentColumnTableRelation> thriftParentColumnRelation) {
     List<ParentColumnTableRelation> parentColumnTableRelationList = new ArrayList<>();
     for (org.apache.carbondata.format.ParentColumnTableRelation carbonTableRelation :
-        thirftParentColumnRelation) {
+        thriftParentColumnRelation) {
       RelationIdentifier relationIdentifier =
           new RelationIdentifier(carbonTableRelation.getRelationIdentifier().getDatabaseName(),
               carbonTableRelation.getRelationIdentifier().getTableName(),
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/SchemaEvolutionEntry.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/SchemaEvolutionEntry.java
index 33f6902..8fcfe49 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/SchemaEvolutionEntry.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/SchemaEvolutionEntry.java
@@ -23,12 +23,12 @@
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 
 /**
- * Store the infomation about the schema evolution
+ * Store the information about the schema evolution
  */
 public class SchemaEvolutionEntry implements Serializable {
 
   /**
-   * serilization version
+   * serialization version
    */
   private static final long serialVersionUID = -7619477063676325276L;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
index 041066b..5da81af 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
@@ -162,9 +162,9 @@
     IndexTableInfo[] indexTableInfos = fromGson(oldIndexIno);
     for (IndexTableInfo indexTableInfo : indexTableInfos) {
       if (indexTableInfo.tableName.equalsIgnoreCase(indexName)) {
-        Map<String, String> oldindexProperties = indexTableInfo.indexProperties;
-        oldindexProperties.put(CarbonCommonConstants.INDEX_STATUS, IndexStatus.ENABLED.name());
-        indexTableInfo.setIndexProperties(oldindexProperties);
+        Map<String, String> oldIndexProperties = indexTableInfo.indexProperties;
+        oldIndexProperties.put(CarbonCommonConstants.INDEX_STATUS, IndexStatus.ENABLED.name());
+        indexTableInfo.setIndexProperties(oldIndexProperties);
       }
     }
     return toGson(indexTableInfos);
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index c678c73..a8cc9e9 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -95,7 +95,7 @@
   // An ordered list, same order as when creating this table by user
   private List<CarbonColumn> createOrderColumn;
 
-  // Implicit columns that for internal usage, like positionid and tupleid for update/delete
+  // Implicit columns that for internal usage, like positionId and tupleId for update/delete
   // operation. see CARBON_IMPLICIT_COLUMN_POSITIONID, CARBON_IMPLICIT_COLUMN_TUPLEID
   private List<CarbonDimension> implicitDimensions;
 
@@ -146,7 +146,7 @@
   }
 
   /**
-   * During creation of TableInfo from hivemetastore the IndexSchemas and the columns
+   * During creation of TableInfo from hive metastore the IndexSchemas and the columns
    * DataTypes are not converted to the appropriate child classes.
    * This method will cast the same to the appropriate classes
    */
@@ -341,7 +341,7 @@
           complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
           allDimensions.add(complexDimension);
           dimensionOrdinal =
-              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
+              readAllComplexTypeChildren(dimensionOrdinal, columnSchema.getNumberOfChild(),
                   listOfColumns, complexDimension);
           i = dimensionOrdinal - 1;
           complexTypeOrdinal = assignComplexOrdinal(complexDimension, complexTypeOrdinal);
@@ -377,7 +377,7 @@
   }
 
   /**
-   * This method will add implicit dimension into carbontable
+   * This method will add implicit dimension into carbon table
    */
   private void addImplicitDimension(int dimensionOrdinal, List<CarbonDimension> dimensions) {
     dimensions.add(new CarbonImplicitDimension(dimensionOrdinal,
@@ -397,7 +397,7 @@
    * Read all primitive/complex children and set it as list of child carbon dimension to parent
    * dimension
    */
-  private int readAllComplexTypeChildrens(int dimensionOrdinal, int childCount,
+  private int readAllComplexTypeChildren(int dimensionOrdinal, int childCount,
       List<ColumnSchema> listOfColumns, CarbonDimension parentDimension) {
     for (int i = 0; i < childCount; i++) {
       ColumnSchema columnSchema = listOfColumns.get(dimensionOrdinal);
@@ -409,7 +409,7 @@
           complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
           parentDimension.getListOfChildDimensions().add(complexDimension);
           dimensionOrdinal =
-              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
+              readAllComplexTypeChildren(dimensionOrdinal, columnSchema.getNumberOfChild(),
                   listOfColumns, complexDimension);
         } else {
           CarbonDimension carbonDimension =
@@ -426,18 +426,18 @@
    * Read all primitive/complex children and set it as list of child carbon dimension to parent
    * dimension
    */
-  private int assignComplexOrdinal(CarbonDimension parentDimension, int complexDimensionOrdianl) {
+  private int assignComplexOrdinal(CarbonDimension parentDimension, int complexDimensionOrdinal) {
     for (int i = 0; i < parentDimension.getNumberOfChild(); i++) {
       CarbonDimension dimension = parentDimension.getListOfChildDimensions().get(i);
       if (dimension.getNumberOfChild() > 0) {
-        dimension.setComplexTypeOridnal(++complexDimensionOrdianl);
-        complexDimensionOrdianl = assignComplexOrdinal(dimension, complexDimensionOrdianl);
+        dimension.setComplexTypeOrdinal(++complexDimensionOrdinal);
+        complexDimensionOrdinal = assignComplexOrdinal(dimension, complexDimensionOrdinal);
       } else {
         parentDimension.getListOfChildDimensions().get(i)
-            .setComplexTypeOridnal(++complexDimensionOrdianl);
+            .setComplexTypeOrdinal(++complexDimensionOrdinal);
       }
     }
-    return complexDimensionOrdianl;
+    return complexDimensionOrdinal;
   }
 
   /**
@@ -448,14 +448,14 @@
   }
 
   /**
-   * @return the tabelName
+   * @return the tableName
    */
   public String getTableName() {
     return tableInfo.getFactTable().getTableName();
   }
 
   /**
-   * @return the tabelId
+   * @return the tableId
    */
   public String getTableId() {
     return tableInfo.getFactTable().getTableId();
@@ -927,7 +927,7 @@
    * methods returns true if operation is allowed for the corresponding Index or not
    * if this operation makes Index stale it is not allowed
    *
-   * @param carbonTable carbontable to be operated
+   * @param carbonTable carbon table to be operated
    * @param operation   which operation on the table,such as drop column,change datatype.
    * @param targets     objects which the operation impact on,such as column
    * @return true allow;false not allow
@@ -950,7 +950,7 @@
       }
     } catch (Exception e) {
       // since method returns true or false and based on that calling function throws exception, no
-      // need to throw the catched exception
+      // need to throw the catch exception
       LOGGER.error(e.getMessage(), e);
       return true;
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
index 05a6186..a2be5e6 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
@@ -79,9 +79,9 @@
    * or Transactional Table. The difference between Transactional and Non Transactional table is
    * Non Transactional Table will not contain any Metadata folder and subsequently
    * no TableStatus or Schema files.
-   * All ACID properties cannot be aplied to Non Transactional Table as there is no Commit points
+   * All ACID properties cannot be applied to Non Transactional Table as there is no Commit points
    * i.e. no TableStatus File.
-   * What ever files present in the path will be read but it system doesnot ensure ACID rules for
+   * What ever files present in the path will be read but it system doesn't ensure ACID rules for
    * this data, mostly Consistency part.
    *
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index 85c7674..28c4388 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -108,9 +108,9 @@
     schema.setTableId(UUID.randomUUID().toString());
     schema.setPartitionInfo(null);
     schema.setBucketingInfo(null);
-    SchemaEvolution schemaEvol = new SchemaEvolution();
-    schemaEvol.setSchemaEvolutionEntryList(new ArrayList<SchemaEvolutionEntry>());
-    schema.setSchemaEvolution(schemaEvol);
+    SchemaEvolution schemaEvolution = new SchemaEvolution();
+    schemaEvolution.setSchemaEvolutionEntryList(new ArrayList<SchemaEvolutionEntry>());
+    schema.setSchemaEvolution(schemaEvolution);
     List<ColumnSchema> allColumns = new LinkedList<>(sortColumns);
     allColumns.addAll(dimension);
     allColumns.addAll(varCharColumns);
@@ -133,10 +133,9 @@
     if (isLocalDictionaryEnabled) {
       property.put(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE,
           String.valueOf(isLocalDictionaryEnabled));
-      String localdictionaryThreshold = localDictionaryThreshold.equalsIgnoreCase("0") ?
-          CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD_DEFAULT :
-          localDictionaryThreshold;
-      property.put(CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD, localdictionaryThreshold);
+      String localDictionaryThreshold = this.localDictionaryThreshold.equalsIgnoreCase("0") ?
+          CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD_DEFAULT : this.localDictionaryThreshold;
+      property.put(CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD, localDictionaryThreshold);
       for (int index = 0; index < allColumns.size(); index++) {
         ColumnSchema colSchema = allColumns.get(index);
         if (colSchema.getDataType() == DataTypes.STRING
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonColumn.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonColumn.java
index b095cb2..ae2775a 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonColumn.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonColumn.java
@@ -175,7 +175,7 @@
   }
 
   /**
-   * @return columnproperty
+   * @return column property map
    */
   public Map<String, String> getColumnProperties() {
     return this.columnSchema.getColumnProperties();
@@ -216,7 +216,7 @@
     this.useActualData = useActualData;
   }
 
-  public boolean isColmatchBasedOnId(CarbonColumn queryColumn) {
+  public boolean isColumnMatchBasedOnId(CarbonColumn queryColumn) {
     return this.getColName().equalsIgnoreCase(this.getColumnId()) && this.getColName()
         .equalsIgnoreCase(queryColumn.getColName());
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
index c93216b..39a706d 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
@@ -35,7 +35,7 @@
 
   /**
    * in case of dictionary dimension this will store the ordinal
-   * of the dimension in mdkey
+   * of the dimension in MDKey
    */
   private int keyOrdinal;
 
@@ -86,7 +86,7 @@
     return keyOrdinal;
   }
 
-  public void setComplexTypeOridnal(int complexTypeOrdinal) {
+  public void setComplexTypeOrdinal(int complexTypeOrdinal) {
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonImplicitDimension.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonImplicitDimension.java
index 01222c6..b536726 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonImplicitDimension.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonImplicitDimension.java
@@ -27,7 +27,7 @@
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 
 /**
- * This instance will be created for implicit column like tupleid.
+ * This instance will be created for implicit column like tupleId.
  */
 public class CarbonImplicitDimension extends CarbonDimension {
   /**
@@ -124,7 +124,7 @@
   }
 
   /**
-   * To specify the visibily of the column by default its false
+   * To specify the visibility of the column by default its false
    */
   public boolean isInvisible() {
     return true;
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
index c43b76f..dd4b4a0 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
@@ -137,7 +137,7 @@
   private List<ParentColumnTableRelation> parentColumnTableRelations;
 
   /**
-   * timeseries function applied on column
+   * time-series function applied on column
    */
   private String timeSeriesFunction = "";
 
@@ -419,7 +419,7 @@
   }
 
   /**
-   * return columnproperties
+   * return column properties
    */
   public Map<String, String> getColumnProperties() {
     return columnProperties;
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ParentColumnTableRelation.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ParentColumnTableRelation.java
index dd84ca2..fa73e88 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ParentColumnTableRelation.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ParentColumnTableRelation.java
@@ -40,10 +40,10 @@
 
   private String columnName;
 
-  public ParentColumnTableRelation(RelationIdentifier relationIdentifier, String columId,
+  public ParentColumnTableRelation(RelationIdentifier relationIdentifier, String columnId,
       String columnName) {
     this.relationIdentifier = relationIdentifier;
-    this.columnId = columId;
+    this.columnId = columnId;
     this.columnName = columnName;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
index 77ebf3e..791c422 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
@@ -95,7 +95,7 @@
    */
   public static String getTableBlockPath(String tid, String tablePath, boolean isStandardTable) {
     String partField = getRequiredFieldFromTID(tid, TupleIdEnum.PART_ID);
-    // If it has segment file then partfield can be appended directly to table path
+    // If it has segment file then part field can be appended directly to table path
     if (!isStandardTable) {
       return tablePath + CarbonCommonConstants.FILE_SEPARATOR + partField.replace("#", "/");
     }
@@ -173,7 +173,7 @@
     } finally {
       if (lockStatus) {
         if (updateLock.unlock()) {
-          LOGGER.info("Unlock the segment update lock successfull.");
+          LOGGER.info("Unlock the segment update lock successful.");
         } else {
           LOGGER.error("Not able to unlock the segment update lock.");
         }
@@ -215,15 +215,15 @@
    * @param updatedSegmentsList
    * @param table
    * @param updatedTimeStamp
-   * @param isTimestampUpdationRequired
+   * @param isTimestampUpdateRequired
    * @param segmentsToBeDeleted
    * @return
    */
   public static boolean updateTableMetadataStatus(Set<Segment> updatedSegmentsList,
-      CarbonTable table, String updatedTimeStamp, boolean isTimestampUpdationRequired,
+      CarbonTable table, String updatedTimeStamp, boolean isTimestampUpdateRequired,
       List<Segment> segmentsToBeDeleted) {
     return updateTableMetadataStatus(updatedSegmentsList, table, updatedTimeStamp,
-        isTimestampUpdationRequired, segmentsToBeDeleted, new ArrayList<Segment>(), "");
+        isTimestampUpdateRequired, segmentsToBeDeleted, new ArrayList<Segment>(), "");
   }
 
   /**
@@ -231,12 +231,12 @@
    * @param updatedSegmentsList
    * @param table
    * @param updatedTimeStamp
-   * @param isTimestampUpdationRequired
+   * @param isTimestampUpdateRequired
    * @param segmentsToBeDeleted
    * @return
    */
   public static boolean updateTableMetadataStatus(Set<Segment> updatedSegmentsList,
-      CarbonTable table, String updatedTimeStamp, boolean isTimestampUpdationRequired,
+      CarbonTable table, String updatedTimeStamp, boolean isTimestampUpdateRequired,
       List<Segment> segmentsToBeDeleted, List<Segment> segmentFilesTobeUpdated, String uuid) {
 
     boolean status = false;
@@ -253,14 +253,14 @@
       if (lockStatus) {
         LOGGER.info(
                 "Acquired lock for table" + table.getDatabaseName() + "." + table.getTableName()
-                        + " for table status updation");
+                        + " for table status update");
 
         LoadMetadataDetails[] listOfLoadFolderDetailsArray =
                 SegmentStatusManager.readLoadMetadata(metaDataFilepath);
 
         for (LoadMetadataDetails loadMetadata : listOfLoadFolderDetailsArray) {
 
-          if (isTimestampUpdationRequired) {
+          if (isTimestampUpdateRequired) {
             // we are storing the link between the 2 status files in the segment 0 only.
             if (loadMetadata.getLoadName().equalsIgnoreCase("0")) {
               loadMetadata.setUpdateStatusFileName(
@@ -270,14 +270,14 @@
             // if the segments is in the list of marked for delete then update the status.
             if (segmentsToBeDeleted.contains(new Segment(loadMetadata.getLoadName()))) {
               loadMetadata.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE);
-              loadMetadata.setModificationOrdeletionTimesStamp(Long.parseLong(updatedTimeStamp));
+              loadMetadata.setModificationOrDeletionTimestamp(Long.parseLong(updatedTimeStamp));
             }
           }
           for (Segment segName : updatedSegmentsList) {
             if (loadMetadata.getLoadName().equalsIgnoreCase(segName.getSegmentNo())) {
               // if this call is coming from the delete delta flow then the time stamp
               // String will come empty then no need to write into table status file.
-              if (isTimestampUpdationRequired) {
+              if (isTimestampUpdateRequired) {
                 // if in case of update flow.
                 if (loadMetadata.getUpdateDeltaStartTimestamp().isEmpty()) {
                   // this means for first time it is getting updated .
@@ -304,19 +304,19 @@
 
         status = true;
       } else {
-        LOGGER.error("Not able to acquire the lock for Table status updation for table " + table
+        LOGGER.error("Not able to acquire the lock for Table status update for table " + table
                 .getDatabaseName() + "." + table.getTableName());
       }
     } finally {
       if (lockStatus) {
         if (carbonLock.unlock()) {
           LOGGER.info(
-                 "Table unlocked successfully after table status updation" + table.getDatabaseName()
+                 "Table unlocked successfully after table status update" + table.getDatabaseName()
                           + "." + table.getTableName());
         } else {
           LOGGER.error(
                   "Unable to unlock Table lock for table" + table.getDatabaseName() + "." + table
-                          .getTableName() + " during table status updation");
+                          .getTableName() + " during table status update");
         }
       }
     }
@@ -375,14 +375,14 @@
   /**
    * returns timestamp as long value
    *
-   * @param timtstamp
+   * @param timestamp
    * @return
    */
-  public static Long getTimeStampAsLong(String timtstamp) {
+  public static Long getTimeStampAsLong(String timestamp) {
     try {
-      return Long.parseLong(timtstamp);
+      return Long.parseLong(timestamp);
     } catch (NumberFormatException nfe) {
-      String errorMsg = "Invalid timestamp : " + timtstamp;
+      String errorMsg = "Invalid timestamp : " + timestamp;
       LOGGER.error(errorMsg);
       return null;
     }
@@ -473,7 +473,7 @@
   }
 
   /**
-   * Handling of the clean up of old carbondata files, index files , delte delta,
+   * Handling of the clean up of old carbondata files, index files , delete delta,
    * update status files.
    * @param table clean up will be handled on this table.
    * @param forceDelete if true then max query execution timeout will not be considered.
@@ -521,7 +521,7 @@
               FileFactory.getCarbonFile(segmentPath);
           CarbonFile[] allSegmentFiles = segDir.listFiles();
 
-          // scan through the segment and find the carbondatafiles and index files.
+          // scan through the segment and find the carbon data files and index files.
           boolean updateSegmentFile = false;
           // deleting of the aborted file scenario.
           if (deleteStaleCarbonDataFiles(segment, allSegmentFiles, updateStatusManager)) {
@@ -719,7 +719,7 @@
 
   /**
    * This function deletes all the stale carbondata files during clean up before update operation
-   * one scenario is if update operation is ubruptly stopped before updation of table status then
+   * one scenario is if update operation is abruptly stopped before update of table status then
    * the carbondata file created during update operation is stale file and it will be deleted in
    * this function in next update operation
    * @param segment
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/SegmentUpdateDetails.java b/core/src/main/java/org/apache/carbondata/core/mutate/SegmentUpdateDetails.java
index abe8f6b..2c5fd9e 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/SegmentUpdateDetails.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/SegmentUpdateDetails.java
@@ -180,16 +180,16 @@
   /**
    * returns timestamp as long value
    *
-   * @param timtstamp
+   * @param timestamp
    * @return
    */
-  private Long getTimeStampAsLong(String timtstamp) {
+  private Long getTimeStampAsLong(String timestamp) {
     long longValue = 0;
     try {
-      longValue = Long.parseLong(timtstamp);
+      longValue = Long.parseLong(timestamp);
     } catch (NumberFormatException nfe) {
       if (LOGGER.isDebugEnabled()) {
-        String errorMsg = "Invalid timestamp : " + timtstamp;
+        String errorMsg = "Invalid timestamp : " + timestamp;
         LOGGER.debug(errorMsg);
       }
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockMappingVO.java b/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockMappingVO.java
index 9f1c713..70a9136 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockMappingVO.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockMappingVO.java
@@ -31,7 +31,7 @@
   private Map<String, RowCountDetailsVO> completeBlockRowDetailVO;
 
   // This map will help us to finding the segment id from the block path.
-  // key is 'blockpath' and value is 'segmentId'
+  // key is 'blockPath' and value is 'segmentId'
   private Map<String, String> blockToSegmentMapping;
 
   public void setCompleteBlockRowDetailVO(Map<String, RowCountDetailsVO> completeBlockRowDetailVO) {
diff --git a/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesFunctionEnum.java b/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesFunctionEnum.java
index f30923b..7cd5835 100644
--- a/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesFunctionEnum.java
+++ b/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesFunctionEnum.java
@@ -18,7 +18,7 @@
 package org.apache.carbondata.core.preagg;
 
 /**
- * enum for timeseries function
+ * enum for time-series function
  */
 public enum TimeSeriesFunctionEnum {
   SECOND("second", 0),
diff --git a/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesUDF.java b/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesUDF.java
index 36c0260..ce8c108 100644
--- a/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesUDF.java
+++ b/core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesUDF.java
@@ -30,7 +30,7 @@
 import org.apache.log4j.Logger;
 
 /**
- * class for applying timeseries udf
+ * class for applying time-series udf
  */
 public class TimeSeriesUDF {
 
@@ -40,7 +40,7 @@
   public final List<String> TIMESERIES_FUNCTION = new ArrayList<>();
 
   // thread local for keeping calender instance
-  private ThreadLocal<Calendar> calanderThreadLocal = new ThreadLocal<>();
+  private ThreadLocal<Calendar> calenderThreadLocal = new ThreadLocal<>();
 
   /**
    * singleton instance
@@ -73,7 +73,7 @@
       return data;
     }
     initialize();
-    Calendar calendar = calanderThreadLocal.get();
+    Calendar calendar = calenderThreadLocal.get();
     calendar.clear();
     calendar.setTimeInMillis(data.getTime());
     TimeSeriesFunctionEnum timeSeriesFunctionEnum =
@@ -132,7 +132,7 @@
         calendar.set(Calendar.MILLISECOND, 0);
         break;
       default:
-        throw new IllegalArgumentException("Invalid timeseries function name: " + function);
+        throw new IllegalArgumentException("Invalid time-series function name: " + function);
     }
     data.setTime(calendar.getTimeInMillis());
     return data;
@@ -151,8 +151,8 @@
    * Below method will be used to initialize the thread local
    */
   private void initialize() {
-    if (calanderThreadLocal.get() == null) {
-      calanderThreadLocal.set(new GregorianCalendar());
+    if (calenderThreadLocal.get() == null) {
+      calenderThreadLocal.set(new GregorianCalendar());
     }
     if (TIMESERIES_FUNCTION.isEmpty()) {
       TIMESERIES_FUNCTION.add("second");
@@ -180,6 +180,6 @@
           DaysOfWeekEnum.valueOf(CarbonCommonConstants.CARBON_TIMESERIES_FIRST_DAY_OF_WEEK_DEFAULT)
               .getOrdinal();
     }
-    calanderThreadLocal.get().setFirstDayOfWeek(firstDayOfWeek);
+    calenderThreadLocal.get().setFirstDayOfWeek(firstDayOfWeek);
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java b/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java
index 4c73e37..a2bc865 100644
--- a/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/profiler/ExplainCollector.java
@@ -159,7 +159,7 @@
     return output;
   }
 
-  public static String getFormatedOutput() {
+  public static String getFormattedOutput() {
     if (null != get()) {
       return get().toString();
     } else {
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
index 4c73833..815efee 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
@@ -114,7 +114,7 @@
   @Override
   public void takeCarbonIndexFileSnapShot() throws IOException {
     // Only Segment Information is updated.
-    // File information will be fetched on the fly according to the fecthed segment info.
+    // File information will be fetched on the fly according to the fetched segment info.
     this.loadMetadataDetails = SegmentStatusManager
         .readTableStatusFile(CarbonTablePath.getTableStatusFilePath(identifier.getTablePath()));
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
index 686b0fa..e05e581 100644
--- a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
@@ -116,11 +116,11 @@
 
   /**
    * Below method will be used to read the delete delta files
-   * and get the map of blockletid and page id mapping to deleted
+   * and get the map of blockletId and page id mapping to deleted
    * rows
    *
    * @param deltaFiles delete delta files array
-   * @return map of blockletid_pageid to deleted rows
+   * @return map of blockletId_pageId to deleted rows
    */
   public Map<String, DeleteDeltaVo> getDeletedRowsDataVo(String[] deltaFiles) {
     List<Future<DeleteDeltaBlockDetails>> taskSubmitList = new ArrayList<>();
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
index 56985fe..c71722b 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
@@ -55,7 +55,7 @@
     if (blockExecutionInfo.isRawRecordDetailQuery()) {
       if (blockExecutionInfo.isRestructuredBlock()) {
         if (blockExecutionInfo.isRequiredRowId()) {
-          LOGGER.info("RowId Restructure based raw ollector is used to scan and collect the data");
+          LOGGER.info("RowId Restructure based raw collector is used to scan and collect the data");
           scannerResultAggregator = new RowIdRestructureBasedRawResultCollector(blockExecutionInfo);
         } else {
           LOGGER.info("Restructure based raw collector is used to scan and collect the data");
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
index 1890f4b..947a561 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -73,11 +73,11 @@
   private byte[][] noDictionaryKeys;
   private byte[][] complexTypeKeyArray;
 
-  protected Map<Integer, GenericQueryType> comlexDimensionInfoMap;
+  protected Map<Integer, GenericQueryType> complexDimensionInfoMap;
 
   /**
    * Field of this Map is the parent Column and associated child columns.
-   * Final Projection shuld be a merged list consist of only parents.
+   * Final Projection should be a merged list consist of only parents.
    */
   private Map<Integer, List<Integer>> parentToChildColumnsMap = new HashMap<>();
 
@@ -101,7 +101,7 @@
     queryMeasures = executionInfo.getProjectionMeasures();
     initDimensionAndMeasureIndexesForFillingData();
     isDimensionExists = queryDimensions.length > 0;
-    this.comlexDimensionInfoMap = executionInfo.getComlexDimensionInfoMap();
+    this.complexDimensionInfoMap = executionInfo.getComplexDimensionInfoMap();
     this.readOnlyDelta = executionInfo.isReadOnlyDelta();
   }
 
@@ -157,7 +157,7 @@
         fillComplexColumnDataBufferForThisRow();
         for (int i = 0; i < queryDimensions.length; i++) {
           fillDimensionData(scannedResult, surrogateResult, noDictionaryKeys, complexTypeKeyArray,
-              comlexDimensionInfoMap, row, i, queryDimensions[i].getDimension().getOrdinal());
+              complexDimensionInfoMap, row, i, queryDimensions[i].getDimension().getOrdinal());
         }
       }
       fillMeasureData(scannedResult, row);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
index 9f18894..84c9cd0 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
@@ -70,7 +70,7 @@
     super(blockExecutionInfos);
     this.isDirectVectorFill = blockExecutionInfos.isDirectVectorFill();
     if (this.isDirectVectorFill) {
-      LOGGER.info("Direct pagewise vector fill collector is used to scan and collect the data");
+      LOGGER.info("Direct page-wise vector fill collector is used to scan and collect the data");
     }
     // initialize only if the current block is not a restructured block else the initialization
     // will be taken care by RestructureBasedVectorResultCollector
@@ -118,7 +118,7 @@
         columnVectorInfo.dimension = queryDimensions[i];
         columnVectorInfo.ordinal = queryDimensions[i].getDimension().getOrdinal();
         columnVectorInfo.genericQueryType =
-            executionInfo.getComlexDimensionInfoMap().get(columnVectorInfo.ordinal);
+            executionInfo.getComplexDimensionInfoMap().get(columnVectorInfo.ordinal);
         allColumnInfo[queryDimensions[i].getOrdinal()] = columnVectorInfo;
       } else {
         ColumnVectorInfo columnVectorInfo = new ColumnVectorInfo();
@@ -164,7 +164,7 @@
     if (isDirectVectorFill) {
       collectResultInColumnarBatchDirect(scannedResult, columnarBatch);
     } else {
-      int numberOfPages = scannedResult.numberOfpages();
+      int numberOfPages = scannedResult.numberOfPages();
       int filteredRows = 0;
       while (scannedResult.getCurrentPageCounter() < numberOfPages) {
         int currentPageRowCount = scannedResult.getCurrentPageRowCount();
@@ -226,7 +226,7 @@
    */
   private void collectResultInColumnarBatchDirect(BlockletScannedResult scannedResult,
       CarbonColumnarBatch columnarBatch) {
-    int numberOfPages = scannedResult.numberOfpages();
+    int numberOfPages = scannedResult.numberOfPages();
     while (scannedResult.getCurrentPageCounter() < numberOfPages) {
       int currentPageRowCount = scannedResult.getCurrentPageRowCount();
       if (currentPageRowCount == 0) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
index c4851c7..4bdc19d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
@@ -67,7 +67,7 @@
    */
   protected void scanAndFillData(BlockletScannedResult scannedResult, int batchSize,
       List<Object[]> listBasedResult, ProjectionMeasure[] queryMeasures) {
-    int numberOfPages = scannedResult.numberOfpages();
+    int numberOfPages = scannedResult.numberOfPages();
     // loop will exit once the batchSize data has been read or the pages have been exhausted
     while (scannedResult.getCurrentPageCounter() < numberOfPages) {
       int currentPageRowCount = scannedResult.getCurrentPageRowCount();
@@ -88,7 +88,7 @@
       if (batchSize > availableRows) {
         batchSize = batchSize - availableRows;
       } else {
-        // this is done because in IUD cases actuals rows fetch can be less than batch size as
+        // this is done because in IUD cases actual rows fetch can be less than batch size as
         // some of the rows could have deleted. So in those cases batchSize need to be
         // re initialized with left over value
         batchSize = 0;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
index fa0b2a9..1422aac 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
@@ -71,8 +71,8 @@
     int[] surrogateResult;
     byte[][] noDictionaryKeys;
     byte[][] complexTypeKeyArray;
-    Map<Integer, GenericQueryType> comlexDimensionInfoMap =
-        executionInfo.getComlexDimensionInfoMap();
+    Map<Integer, GenericQueryType> complexDimensionInfoMap =
+        executionInfo.getComplexDimensionInfoMap();
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       scannedResult.incrementCounter();
       if (scannedResult.containsDeletedRow(scannedResult.getCurrentRowId())) {
@@ -102,7 +102,7 @@
             continue;
           }
           fillDimensionData(scannedResult, surrogateResult, noDictionaryKeys, complexTypeKeyArray,
-              comlexDimensionInfoMap, row, i, executionInfo
+              complexDimensionInfoMap, row, i, executionInfo
                   .getProjectionDimensions()[segmentDimensionsIdx++].getDimension().getOrdinal());
         }
       }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
index 7d70517..22e41ca 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
@@ -103,7 +103,7 @@
   @Override
   public void collectResultInColumnarBatch(BlockletScannedResult scannedResult,
       CarbonColumnarBatch columnarBatch) {
-    int numberOfPages = scannedResult.numberOfpages();
+    int numberOfPages = scannedResult.numberOfPages();
     while (scannedResult.getCurrentPageCounter() < numberOfPages) {
       int currentPageRowCount = scannedResult.getCurrentPageRowCount();
       if (currentPageRowCount == 0) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdBasedResultCollector.java
index 7a0732b..857b2f5 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdBasedResultCollector.java
@@ -61,7 +61,7 @@
         complexTypeColumnIndex = 0;
         for (int i = 0; i < queryDimensions.length; i++) {
           fillDimensionData(scannedResult, surrogateResult, noDictionaryKeys, complexTypeKeyArray,
-              comlexDimensionInfoMap, row, i, queryDimensions[i].getDimension().getOrdinal());
+              complexDimensionInfoMap, row, i, queryDimensions[i].getDimension().getOrdinal());
         }
       }
       row[columnCount + 2] = scannedResult.getCurrentRowId();
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRawBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRawBasedResultCollector.java
index a5625e2..fd23bb9 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRawBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRawBasedResultCollector.java
@@ -32,10 +32,10 @@
 
 /**
  * It is not a collector it is just a scanned result holder.
- * most of the lines are copyied from `RawBasedResultCollector`, the difference in function is that
+ * most of the lines are copied from `RawBasedResultCollector`, the difference in function is that
  * this class return all the dimensions in a ByteArrayWrapper and append blockletNo/PageId/RowId at
  * end of the row.
- * This implementation refers to `RawBasedResultCollector` and `RowIdBaedResultCollector`
+ * This implementation refers to `RawBasedResultCollector` and `RowIdBasedResultCollector`
  */
 @InterfaceAudience.Internal
 public class RowIdRawBasedResultCollector extends AbstractScannedResultCollector {
@@ -73,7 +73,7 @@
    */
   protected void scanAndFillData(BlockletScannedResult scannedResult, int batchSize,
       List<Object[]> listBasedResult, ProjectionMeasure[] queryMeasures) {
-    int numberOfPages = scannedResult.numberOfpages();
+    int numberOfPages = scannedResult.numberOfPages();
     // loop will exit once the batchSize data has been read or the pages have been exhausted
     while (scannedResult.getCurrentPageCounter() < numberOfPages) {
       int currentPageRowCount = scannedResult.getCurrentPageRowCount();
@@ -94,7 +94,7 @@
       if (batchSize > availableRows) {
         batchSize = batchSize - availableRows;
       } else {
-        // this is done because in IUD cases actuals rows fetch can be less than batch size as
+        // this is done because in IUD cases actual rows fetch can be less than batch size as
         // some of the rows could have deleted. So in those cases batchSize need to be
         // re initialized with left over value
         batchSize = 0;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRestructureBasedRawResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRestructureBasedRawResultCollector.java
index a8d99d2..592e439 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRestructureBasedRawResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RowIdRestructureBasedRawResultCollector.java
@@ -45,7 +45,7 @@
   @Override
   protected void scanAndFillData(BlockletScannedResult scannedResult, int batchSize,
                                List<Object[]> listBasedResult, ProjectionMeasure[] queryMeasures) {
-    int numberOfPages = scannedResult.numberOfpages();
+    int numberOfPages = scannedResult.numberOfPages();
     // loop will exit once the batchSize data has been read or the pages have been exhausted
     while (scannedResult.getCurrentPageCounter() < numberOfPages) {
       int currentPageRowCount = scannedResult.getCurrentPageRowCount();
@@ -66,7 +66,7 @@
       if (batchSize > availableRows) {
         batchSize = batchSize - availableRows;
       } else {
-        // this is done because in IUD cases actuals rows fetch can be less than batch size as
+        // this is done because in IUD cases actual rows fetch can be less than batch size as
         // some of the rows could have deleted. So in those cases batchSize need to be
         // re initialized with left over value
         batchSize = 0;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index c1dda69..0623177 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -238,8 +238,8 @@
    * It updates dimensions and measures of query model. In few scenarios like SDK user can configure
    * sort options per load, so if first load has c1 as integer column and configure as sort column
    * then carbon treat that as dimension.But in second load if user change the sort option then the
-   * c1 become measure as bydefault integers are measures. So this method updates the measures to
-   * dimensions and vice versa as per the indexfile schema.
+   * c1 become measure as by default integers are measures. So this method updates the measures to
+   * dimensions and vice versa as per the index file schema.
    */
   private void updateColumns(QueryModel queryModel, List<ColumnSchema> columnsInTable,
       String filePath) throws IOException {
@@ -262,7 +262,7 @@
     List<ProjectionDimension> updatedDims = new ArrayList<>();
     List<ProjectionMeasure> updatedMsrs = new ArrayList<>();
 
-    // Check and update dimensions to measures if it is measure in indexfile schema
+    // Check and update dimensions to measures if it is measure in index file schema
     for (ProjectionDimension dimension : dimensions) {
       int index = columnsInTable.indexOf(dimension.getDimension().getColumnSchema());
       if (index > -1) {
@@ -280,7 +280,7 @@
       }
     }
 
-    // Check and update measure to dimension if it is dimension in indexfile schema.
+    // Check and update measure to dimension if it is dimension in index file schema.
     for (ProjectionMeasure measure : measures) {
       int index = columnsInTable.indexOf(measure.getMeasure().getColumnSchema());
       if (index > -1) {
@@ -491,9 +491,9 @@
         // loading the filter executor tree for filter evaluation
         filterResolverIntf = queryModel.getIndexFilter().getResolver();
       }
-      blockExecutionInfo.setFilterExecuterTree(
-          FilterUtil.getFilterExecuterTree(filterResolverIntf, segmentProperties,
-              blockExecutionInfo.getComlexDimensionInfoMap(), false));
+      blockExecutionInfo.setFilterExecutorTree(
+          FilterUtil.getFilterExecutorTree(filterResolverIntf, segmentProperties,
+              blockExecutionInfo.getComplexDimensionInfoMap(), false));
     }
     // expression measure
     List<CarbonMeasure> expressionMeasures =
@@ -501,13 +501,13 @@
     // setting all the dimension chunk indexes to be read from file
     int numberOfElementToConsider = 0;
     // list of dimensions to be projected
-    Set<Integer> allProjectionListDimensionIdexes = new LinkedHashSet<>();
+    Set<Integer> allProjectionListDimensionIndexes = new LinkedHashSet<>();
     // create a list of filter dimensions present in the current block
     Set<CarbonDimension> currentBlockFilterDimensions =
         getCurrentBlockFilterDimensions(queryProperties.complexFilterDimension, segmentProperties);
     int[] dimensionChunkIndexes = QueryUtil.getDimensionChunkIndexes(projectDimensions,
         segmentProperties.getDimensionOrdinalToChunkMapping(),
-        currentBlockFilterDimensions, allProjectionListDimensionIdexes);
+        currentBlockFilterDimensions, allProjectionListDimensionIndexes);
     ReusableDataBuffer[] dimensionBuffer = new ReusableDataBuffer[projectDimensions.size()];
     for (int i = 0; i < dimensionBuffer.length; i++) {
       dimensionBuffer[i] = new ReusableDataBuffer();
@@ -515,11 +515,11 @@
     blockExecutionInfo.setDimensionReusableDataBuffer(dimensionBuffer);
     int numberOfColumnToBeReadInOneIO = Integer.parseInt(CarbonProperties.getInstance()
         .getProperty(CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO,
-            CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE));
+            CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE));
 
     if (dimensionChunkIndexes.length > 0) {
       numberOfElementToConsider = dimensionChunkIndexes[dimensionChunkIndexes.length - 1]
-          == segmentProperties.getBlockTodimensionOrdinalMapping().size() - 1 ?
+          == segmentProperties.getBlockToDimensionOrdinalMapping().size() - 1 ?
           dimensionChunkIndexes.length - 1 :
           dimensionChunkIndexes.length;
       blockExecutionInfo.setAllSelectedDimensionColumnIndexRange(
@@ -559,8 +559,8 @@
     }
     // setting the indexes of list of dimension in projection list
     blockExecutionInfo.setProjectionListDimensionIndexes(ArrayUtils.toPrimitive(
-        allProjectionListDimensionIdexes
-            .toArray(new Integer[allProjectionListDimensionIdexes.size()])));
+        allProjectionListDimensionIndexes
+            .toArray(new Integer[allProjectionListDimensionIndexes.size()])));
     // setting the indexes of list of measures in projection list
     blockExecutionInfo.setProjectionListMeasureIndexes(ArrayUtils.toPrimitive(
         allProjectionListMeasureIndexes
@@ -720,7 +720,7 @@
         exceptionOccurred = e;
       }
     }
-    // clear all the unsafe memory used for the given task ID only if it is neccessary to be cleared
+    // clear all the unsafe memory used for the given task ID only if it is necessary to be cleared
     if (freeUnsafeMemory) {
       UnsafeMemoryManager.INSTANCE
           .freeMemoryAll(ThreadLocalTaskInfo.getCarbonTaskInfo().getTaskId());
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
index e3b4cd9..9755761 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
@@ -24,7 +24,7 @@
 import org.apache.carbondata.core.datastore.block.AbstractIndex;
 import org.apache.carbondata.core.mutate.DeleteDeltaVo;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
 import org.apache.carbondata.core.scan.model.ProjectionMeasure;
 import org.apache.carbondata.core.stats.QueryStatisticsModel;
@@ -105,7 +105,7 @@
   /**
    * filter tree to execute the filter
    */
-  private FilterExecuter filterExecuterTree;
+  private FilterExecutor filterExecutorTree;
 
   /**
    * whether it needs only raw byte records with out aggregation.
@@ -335,15 +335,15 @@
   /**
    * @return the filterEvaluatorTree
    */
-  public FilterExecuter getFilterExecuterTree() {
-    return filterExecuterTree;
+  public FilterExecutor getFilterExecutorTree() {
+    return filterExecutorTree;
   }
 
   /**
-   * @param filterExecuterTree the filterEvaluatorTree to set
+   * @param filterExecutorTree the filterEvaluatorTree to set
    */
-  public void setFilterExecuterTree(FilterExecuter filterExecuterTree) {
-    this.filterExecuterTree = filterExecuterTree;
+  public void setFilterExecutorTree(FilterExecutor filterExecutorTree) {
+    this.filterExecutorTree = filterExecutorTree;
   }
 
   /**
@@ -385,7 +385,7 @@
   /**
    * @return the complexParentIndexToQueryMap
    */
-  public Map<Integer, GenericQueryType> getComlexDimensionInfoMap() {
+  public Map<Integer, GenericQueryType> getComplexDimensionInfoMap() {
     return complexParentIndexToQueryMap;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
index 53a9358..1b10817 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
@@ -126,12 +126,12 @@
    */
   public static int[] getMeasureChunkIndexes(List<ProjectionMeasure> queryMeasures,
       List<CarbonMeasure> expressionMeasure, Map<Integer, Integer> ordinalToBlockIndexMapping,
-      Set<CarbonMeasure> filterMeasures, List<Integer> allProjectionListMeasureIdexes) {
+      Set<CarbonMeasure> filterMeasures, List<Integer> allProjectionListMeasureIndexes) {
     Set<Integer> measureChunkIndex = new HashSet<Integer>();
     Set<Integer> filterMeasureOrdinal = getFilterMeasureOrdinal(filterMeasures);
     for (int i = 0; i < queryMeasures.size(); i++) {
       Integer measureOrdinal = queryMeasures.get(i).getMeasure().getOrdinal();
-      allProjectionListMeasureIdexes.add(measureOrdinal);
+      allProjectionListMeasureIndexes.add(measureOrdinal);
       if (!filterMeasureOrdinal.contains(measureOrdinal)) {
         measureChunkIndex.add(ordinalToBlockIndexMapping.get(measureOrdinal));
       }
@@ -422,9 +422,9 @@
   }
 
   /**
-   * In case of non transactional table just set columnuniqueid as columnName to support
-   * backward compatabiity. non transactional tables column uniqueid is always equal to
-   * columnname
+   * In case of non transactional table just set column unique id as columnName to support
+   * backward compatibility. non transactional tables column unique id is always equal to
+   * column name
    */
   public static void updateColumnUniqueIdForNonTransactionTable(List<ColumnSchema> columnSchemas) {
     for (ColumnSchema columnSchema : columnSchemas) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
index 921e34d..f90ba32 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
@@ -52,7 +52,7 @@
 public class RestructureUtil {
 
   /**
-   * Below method will be used to get the updated query dimension updation
+   * Below method will be used to get the updated query dimension update
    * means, after restructuring some dimension will be not present in older
    * table blocks in that case we need to select only those dimension out of
    * query dimension which is present in the current table block
@@ -168,7 +168,7 @@
     if (tableColumn.getDataType().isComplexType() && !(tableColumn.getDataType().getId()
         == DataTypes.ARRAY_TYPE_ID)) {
       if (tableColumn.getColumnId().equalsIgnoreCase(queryColumn.getColumnId()) || tableColumn
-          .isColmatchBasedOnId(queryColumn)) {
+          .isColumnMatchBasedOnId(queryColumn)) {
         return true;
       } else {
         return isColumnMatchesStruct(tableColumn, queryColumn);
@@ -180,12 +180,12 @@
           // In case of SDK, columnId is same as columnName therefore the following check will
           // ensure that if the table columnName is same as the query columnName and the table
           // columnId is the same as table columnName then it's a valid columnName to be scanned.
-          || tableColumn.isColmatchBasedOnId(queryColumn));
+          || tableColumn.isColumnMatchBasedOnId(queryColumn));
     }
   }
 
   /**
-   * In case of Multilevel Complex column - STRUCT/STRUCTofSTRUCT, traverse all the child dimension
+   * In case of Multilevel Complex column - Struct/StructOfStruct, traverse all the child dimension
    * to check column Id
    *
    * @param tableColumn
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java
index 2513b0d..24c6312 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java
@@ -34,7 +34,7 @@
       new ArrayList<Expression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 
   // When a filter expression already has the dictionary surrogate values in
-  // it then we set isAlreadyResolved as true so that we donot resolve the
+  // it then we set isAlreadyResolved as true so that we do not resolve the
   // filter expression in further steps.
   protected boolean isAlreadyResolved;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/RangeExpressionEvaluator.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/RangeExpressionEvaluator.java
index 88667eb..be3c9fa 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/RangeExpressionEvaluator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/RangeExpressionEvaluator.java
@@ -77,13 +77,13 @@
   }
 
   /**
-   * This method evaluates is any greaterthan or less than expression can be transformed
+   * This method evaluates is any greater than or less than expression can be transformed
    * into a single RANGE filter.
    */
   public void rangeExpressionEvaluatorMapBased() {
     // The algorithm :
     // Get all the nodes of the Expression Tree and fill it into a MAP.
-    // The Map structure will be currentNode, ColumnName, LessThanOrgreaterThan, Value, ParentNode
+    // The Map structure will be currentNode, ColumnName, LessThanOrGreaterThan, Value, ParentNode
     // Group the rows in MAP according to the columns and then evaluate if it can be transformed
     // into a RANGE or not.
     //
@@ -225,7 +225,7 @@
     // if the parentNode is a ANDExpression and the current node is LessThan, GreaterThan
     // then add the node into filterExpressionMap.
     if ((parentNode instanceof AndExpression) && (isLessThanGreaterThanExp(currentNode)
-        && eligibleForRangeExpConv(currentNode))) {
+        && eligibleForRangeExpConversion(currentNode))) {
       addFilterExpressionMap(filterExpressionMap, currentNode, parentNode);
     }
     // In case of Or Exp we have to evaluate both the subtrees of expression separately
@@ -282,7 +282,7 @@
    * @param expChild
    * @return
    */
-  private boolean eligibleForRangeExpConv(Expression expChild) {
+  private boolean eligibleForRangeExpConversion(Expression expChild) {
     for (Expression exp : expChild.getChildren()) {
       if (exp instanceof ColumnExpression) {
         return ((ColumnExpression) exp).isDimension() &&
@@ -393,7 +393,7 @@
   /**
    * This Method Traverses the Expression Tree to find the corresponding node of the Range
    * Expression. If one node of Range Expression is LessThan then a corresponding GreaterThan
-   * will be choosen or vice versa.
+   * will be chosen or vice versa.
    *
    * @param currentNode
    * @param parentNode
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/UnknownExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/UnknownExpression.java
index cbea664..9ca8a94 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/UnknownExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/UnknownExpression.java
@@ -20,14 +20,14 @@
 import java.util.List;
 
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
 public abstract class UnknownExpression extends Expression {
 
   public abstract List<ColumnExpression> getAllColumnList();
 
-  public FilterExecuter getFilterExecuter(FilterResolverIntf filterResolverIntf,
+  public FilterExecutor getFilterExecutor(FilterResolverIntf filterResolverIntf,
       SegmentProperties segmentProperties) {
     return null;
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/ConditionalExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/ConditionalExpression.java
index d7b940c..dd67c54 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/ConditionalExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/ConditionalExpression.java
@@ -24,7 +24,7 @@
 
 public interface ConditionalExpression {
 
-  // Will get the column informations involved in the expressions by
+  // Will get the column information involved in the expressions by
   // traversing the tree
   List<ColumnExpression> getColumnList();
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/EqualToExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/EqualToExpression.java
index fb5e9e4..fe0d5fd 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/EqualToExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/EqualToExpression.java
@@ -59,7 +59,7 @@
       }
       return elRes;
     }
-    //default implementation if the data types are different for the resultsets
+    //default implementation if the data types are different for the result sets
     if (elRes.getDataType() != erRes.getDataType()) {
       if (elRes.getDataType().getPrecedenceOrder() < erRes.getDataType().getPrecedenceOrder()) {
         val2 = elRes;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/InExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/InExpression.java
index 390ff28..7a1607c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/InExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/InExpression.java
@@ -41,16 +41,16 @@
   @Override
   public ExpressionResult evaluate(RowIntf value)
       throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult leftRsult = left.evaluate(value);
+    ExpressionResult leftResult = left.evaluate(value);
 
     if (setOfExprResult == null) {
-      ExpressionResult rightRsult = right.evaluate(value);
+      ExpressionResult rightResult = right.evaluate(value);
       ExpressionResult val = null;
       setOfExprResult = new HashSet<ExpressionResult>(10);
-      for (ExpressionResult expressionResVal : rightRsult.getList()) {
-        if (expressionResVal.getDataType().getPrecedenceOrder() < leftRsult.getDataType()
+      for (ExpressionResult expressionResVal : rightResult.getList()) {
+        if (expressionResVal.getDataType().getPrecedenceOrder() < leftResult.getDataType()
             .getPrecedenceOrder()) {
-          val = leftRsult;
+          val = leftResult;
         } else {
           val = expressionResVal;
         }
@@ -88,12 +88,12 @@
     // Left check will cover both the cases when left and right is null therefore no need
     // for a check on the right result.
     // Example: (null==null) -> Left null return false, (1==null) would automatically be false.
-    if (leftRsult.isNull()) {
-      leftRsult.set(DataTypes.BOOLEAN, false);
+    if (leftResult.isNull()) {
+      leftResult.set(DataTypes.BOOLEAN, false);
     } else {
-      leftRsult.set(DataTypes.BOOLEAN, setOfExprResult.contains(leftRsult));
+      leftResult.set(DataTypes.BOOLEAN, setOfExprResult.contains(leftResult));
     }
-    return leftRsult;
+    return leftResult;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotEqualsExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotEqualsExpression.java
index 69c7cc5..f393754 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotEqualsExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotEqualsExpression.java
@@ -56,7 +56,7 @@
       }
       return elRes;
     }
-    //default implementation if the data types are different for the resultsets
+    //default implementation if the data types are different for the result sets
     if (elRes.getDataType() != erRes.getDataType()) {
       if (elRes.getDataType().getPrecedenceOrder() < erRes.getDataType().getPrecedenceOrder()) {
         val1 = erRes;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
index 6bd09a5..1ce7e98 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
@@ -51,27 +51,27 @@
       return nullValuePresent;
     }
 
-    ExpressionResult leftRsult = left.evaluate(value);
-    if (leftRsult.isNull()) {
-      leftRsult.set(DataTypes.BOOLEAN, false);
-      return leftRsult;
+    ExpressionResult leftResult = left.evaluate(value);
+    if (leftResult.isNull()) {
+      leftResult.set(DataTypes.BOOLEAN, false);
+      return leftResult;
     }
 
     if (setOfExprResult == null) {
       ExpressionResult val = null;
-      ExpressionResult rightRsult = right.evaluate(value);
+      ExpressionResult rightResult = right.evaluate(value);
       setOfExprResult = new HashSet<ExpressionResult>(10);
-      for (ExpressionResult exprResVal : rightRsult.getList()) {
+      for (ExpressionResult exprResVal : rightResult.getList()) {
 
         if (exprResVal.isNull()) {
           nullValuePresent = new ExpressionResult(DataTypes.BOOLEAN, false);
-          leftRsult.set(DataTypes.BOOLEAN, false);
-          return leftRsult;
+          leftResult.set(DataTypes.BOOLEAN, false);
+          return leftResult;
         }
 
-        if (exprResVal.getDataType().getPrecedenceOrder() < leftRsult.getDataType()
+        if (exprResVal.getDataType().getPrecedenceOrder() < leftResult.getDataType()
             .getPrecedenceOrder()) {
-          val = leftRsult;
+          val = leftResult;
         } else {
           val = exprResVal;
         }
@@ -103,8 +103,8 @@
       }
     }
 
-    leftRsult.set(DataTypes.BOOLEAN, !setOfExprResult.contains(leftRsult));
-    return leftRsult;
+    leftResult.set(DataTypes.BOOLEAN, !setOfExprResult.contains(leftResult));
+    return leftResult;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/logical/BinaryLogicalExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/logical/BinaryLogicalExpression.java
index ecbb8d9..06d8150 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/logical/BinaryLogicalExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/logical/BinaryLogicalExpression.java
@@ -48,7 +48,7 @@
     return listOfExp;
   }
 
-  // Will get the column informations involved in the expressions by
+  // Will get the column information involved in the expressions by
   // traversing the tree
   public List<ColumnExpression> getColumnList() {
     // TODO
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExecutorUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExecutorUtil.java
index efd219f..3e21640 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExecutorUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExecutorUtil.java
@@ -27,7 +27,7 @@
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.scan.filter.executer.FilterBitSetUpdater;
-import org.apache.carbondata.core.scan.filter.executer.MeasureColumnExecuterFilterInfo;
+import org.apache.carbondata.core.scan.filter.executer.MeasureColumnExecutorFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
 import org.apache.carbondata.core.util.DataTypeUtil;
 
@@ -50,19 +50,19 @@
    *
    * @param page
    * @param bitSet
-   * @param measureColumnExecuterFilterInfo
+   * @param measureColumnExecutorFilterInfo
    * @param measureColumnResolvedFilterInfo
    * @param filterBitSetUpdater
    */
   public static void executeIncludeExcludeFilterForMeasure(ColumnPage page, BitSet bitSet,
-      MeasureColumnExecuterFilterInfo measureColumnExecuterFilterInfo,
+      MeasureColumnExecutorFilterInfo measureColumnExecutorFilterInfo,
       MeasureColumnResolvedFilterInfo measureColumnResolvedFilterInfo,
       FilterBitSetUpdater filterBitSetUpdater) {
     final CarbonMeasure measure = measureColumnResolvedFilterInfo.getMeasure();
     final DataType dataType = FilterUtil.getMeasureDataType(measureColumnResolvedFilterInfo);
     int numberOfRows = page.getPageSize();
     BitSet nullBitSet = page.getNullBits();
-    Object[] filterKeys = measureColumnExecuterFilterInfo.getFilterKeys();
+    Object[] filterKeys = measureColumnExecutorFilterInfo.getFilterKeys();
     // to handle the null value
     for (int i = 0; i < filterKeys.length; i++) {
       if (filterKeys[i] == null) {
@@ -71,7 +71,7 @@
         }
       }
     }
-    AbstractCollection filterSet = measureColumnExecuterFilterInfo.getFilterSet();
+    AbstractCollection filterSet = measureColumnExecutorFilterInfo.getFilterSet();
     if (dataType == DataTypes.BYTE) {
       ByteOpenHashSet byteOpenHashSet = (ByteOpenHashSet) filterSet;
       for (int i = 0; i < numberOfRows; i++) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
index e5405ce..0c1ab05 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
@@ -36,7 +36,7 @@
 import org.apache.carbondata.core.scan.expression.logical.AndExpression;
 import org.apache.carbondata.core.scan.expression.logical.OrExpression;
 import org.apache.carbondata.core.scan.expression.logical.TrueExpression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.executer.ImplicitColumnFilterExecutor;
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
 import org.apache.carbondata.core.scan.filter.resolver.ConditionalFilterResolverImpl;
@@ -59,26 +59,26 @@
    * filter expression tree which is been passed in Expression instance.
    *
    * @param expressionTree  , filter expression tree
-   * @param tableIdentifier ,contains carbon store informations
+   * @param tableIdentifier ,contains carbon store information
    * @return a filter resolver tree
    */
   public FilterResolverIntf getFilterResolver(Expression expressionTree,
       AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
     if (null != expressionTree && null != tableIdentifier) {
-      return getFilterResolvertree(expressionTree, tableIdentifier);
+      return getFilterResolverTree(expressionTree, tableIdentifier);
     }
     return null;
   }
 
   /**
    * API will return a filter resolver instance which will be used by
-   * executers to evaluate or execute the filters.
+   * executors to evaluate or execute the filters.
    *
    * @param expressionTree , resolver tree which will hold the resolver tree based on
    *                       filter expression.
    * @return FilterResolverIntf type.
    */
-  private FilterResolverIntf getFilterResolvertree(Expression expressionTree,
+  private FilterResolverIntf getFilterResolverTree(Expression expressionTree,
       AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
     FilterResolverIntf filterEvaluatorTree =
         createFilterResolverTree(expressionTree, tableIdentifier);
@@ -311,14 +311,14 @@
     return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
   }
 
-  public static boolean isScanRequired(FilterExecuter filterExecuter, byte[][] maxValue,
+  public static boolean isScanRequired(FilterExecutor filterExecutor, byte[][] maxValue,
       byte[][] minValue, boolean[] isMinMaxSet) {
-    if (filterExecuter instanceof ImplicitColumnFilterExecutor) {
-      return ((ImplicitColumnFilterExecutor) filterExecuter)
+    if (filterExecutor instanceof ImplicitColumnFilterExecutor) {
+      return ((ImplicitColumnFilterExecutor) filterExecutor)
           .isFilterValuesPresentInAbstractIndex(maxValue, minValue, isMinMaxSet);
     } else {
       // otherwise decide based on min/max value
-      BitSet bitSet = filterExecuter.isScanRequired(maxValue, minValue, isMinMaxSet);
+      BitSet bitSet = filterExecutor.isScanRequired(maxValue, minValue, isMinMaxSet);
       return !bitSet.isEmpty();
     }
   }
@@ -353,12 +353,12 @@
   }
 
   /**
-   * Change UnknownReslover to TrueExpression Reslover.
+   * Change UnknownResolver to TrueExpression Resolver.
    *
    * @param tableIdentifier
    * @return
    */
-  public FilterResolverIntf changeUnknownResloverToTrue(AbsoluteTableIdentifier tableIdentifier) {
+  public FilterResolverIntf changeUnknownResolverToTrue(AbsoluteTableIdentifier tableIdentifier) {
     return getFilterResolverBasedOnExpressionType(ExpressionType.TRUE, false,
         new TrueExpression(null), tableIdentifier, new TrueExpression(null));
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
index 9ef34d9..f6915d7 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
@@ -29,7 +29,7 @@
    * expression tree which is been passed.
    *
    * @param expressionTree  , filter expression tree
-   * @param tableIdentifier ,contains carbon store informations.
+   * @param tableIdentifier ,contains carbon store information.
    * @return
    * @throws FilterUnsupportedException
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
index 6f121ca..3053d91 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
@@ -56,23 +56,23 @@
 import org.apache.carbondata.core.scan.expression.logical.AndExpression;
 import org.apache.carbondata.core.scan.expression.logical.OrExpression;
 import org.apache.carbondata.core.scan.expression.logical.TrueExpression;
-import org.apache.carbondata.core.scan.filter.executer.AndFilterExecuterImpl;
-import org.apache.carbondata.core.scan.filter.executer.DimColumnExecuterFilterInfo;
-import org.apache.carbondata.core.scan.filter.executer.ExcludeFilterExecuterImpl;
+import org.apache.carbondata.core.scan.filter.executer.AndFilterExecutorImpl;
+import org.apache.carbondata.core.scan.filter.executer.DimColumnExecutorFilterInfo;
+import org.apache.carbondata.core.scan.filter.executer.ExcludeFilterExecutorImpl;
 import org.apache.carbondata.core.scan.filter.executer.FalseFilterExecutor;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.executer.ImplicitIncludeFilterExecutorImpl;
-import org.apache.carbondata.core.scan.filter.executer.IncludeFilterExecuterImpl;
-import org.apache.carbondata.core.scan.filter.executer.MeasureColumnExecuterFilterInfo;
-import org.apache.carbondata.core.scan.filter.executer.OrFilterExecuterImpl;
-import org.apache.carbondata.core.scan.filter.executer.RangeValueFilterExecuterImpl;
+import org.apache.carbondata.core.scan.filter.executer.IncludeFilterExecutorImpl;
+import org.apache.carbondata.core.scan.filter.executer.MeasureColumnExecutorFilterInfo;
+import org.apache.carbondata.core.scan.filter.executer.OrFilterExecutorImpl;
+import org.apache.carbondata.core.scan.filter.executer.RangeValueFilterExecutorImpl;
 import org.apache.carbondata.core.scan.filter.executer.RestructureExcludeFilterExecutorImpl;
 import org.apache.carbondata.core.scan.filter.executer.RestructureIncludeFilterExecutorImpl;
-import org.apache.carbondata.core.scan.filter.executer.RowLevelFilterExecuterImpl;
-import org.apache.carbondata.core.scan.filter.executer.RowLevelRangeTypeExecuterFactory;
+import org.apache.carbondata.core.scan.filter.executer.RowLevelFilterExecutorImpl;
+import org.apache.carbondata.core.scan.filter.executer.RowLevelRangeTypeExecutorFactory;
 import org.apache.carbondata.core.scan.filter.executer.TrueFilterExecutor;
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.intf.RowImpl;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 import org.apache.carbondata.core.scan.filter.resolver.ConditionalFilterResolverImpl;
@@ -104,8 +104,8 @@
 
   /**
    * Pattern used : Visitor Pattern
-   * Method will create filter executer tree based on the filter resolved tree,
-   * in this algorithm based on the resolver instance the executers will be visited
+   * Method will create filter executor tree based on the filter resolved tree,
+   * in this algorithm based on the resolver instance the executors will be visited
    * and the resolved surrogates will be converted to keys
    *
    * @param filterExpressionResolverTree
@@ -113,22 +113,22 @@
    * @param complexDimensionInfoMap
    * @param minMaxCacheColumns
    * @param isStreamDataFile: whether create filter executer tree for stream data files
-   * @return FilterExecuter instance
+   * @return FilterExecutor instance
    *
    */
-  private static FilterExecuter createFilterExecuterTree(
+  private static FilterExecutor createFilterExecutorTree(
       FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
       Map<Integer, GenericQueryType> complexDimensionInfoMap,
       List<CarbonColumn> minMaxCacheColumns, boolean isStreamDataFile) {
-    FilterExecuterType filterExecuterType = filterExpressionResolverTree.getFilterExecuterType();
-    if (null != filterExecuterType) {
-      switch (filterExecuterType) {
+    FilterExecutorType filterExecutorType = filterExpressionResolverTree.getFilterExecutorType();
+    if (null != filterExecutorType) {
+      switch (filterExecutorType) {
         case INCLUDE:
           if (null != filterExpressionResolverTree.getDimColResolvedFilterInfo()
               && null != filterExpressionResolverTree.getDimColResolvedFilterInfo()
               .getFilterValues() && filterExpressionResolverTree.getDimColResolvedFilterInfo()
               .getFilterValues().isOptimized()) {
-            return getExcludeFilterExecuter(
+            return getExcludeFilterExecutor(
                 filterExpressionResolverTree.getDimColResolvedFilterInfo(),
                 filterExpressionResolverTree.getMsrColResolvedFilterInfo(), segmentProperties);
           }
@@ -137,24 +137,24 @@
               segmentProperties, minMaxCacheColumns, isStreamDataFile)) {
             return new TrueFilterExecutor();
           }
-          return getIncludeFilterExecuter(
+          return getIncludeFilterExecutor(
               filterExpressionResolverTree.getDimColResolvedFilterInfo(),
               filterExpressionResolverTree.getMsrColResolvedFilterInfo(), segmentProperties);
         case EXCLUDE:
-          return getExcludeFilterExecuter(
+          return getExcludeFilterExecutor(
               filterExpressionResolverTree.getDimColResolvedFilterInfo(),
               filterExpressionResolverTree.getMsrColResolvedFilterInfo(), segmentProperties);
         case OR:
-          return new OrFilterExecuterImpl(
-              createFilterExecuterTree(filterExpressionResolverTree.getLeft(), segmentProperties,
+          return new OrFilterExecutorImpl(
+              createFilterExecutorTree(filterExpressionResolverTree.getLeft(), segmentProperties,
                   complexDimensionInfoMap, minMaxCacheColumns, isStreamDataFile),
-              createFilterExecuterTree(filterExpressionResolverTree.getRight(), segmentProperties,
+              createFilterExecutorTree(filterExpressionResolverTree.getRight(), segmentProperties,
                   complexDimensionInfoMap, minMaxCacheColumns, isStreamDataFile));
         case AND:
-          return new AndFilterExecuterImpl(
-              createFilterExecuterTree(filterExpressionResolverTree.getLeft(), segmentProperties,
+          return new AndFilterExecutorImpl(
+              createFilterExecutorTree(filterExpressionResolverTree.getLeft(), segmentProperties,
                   complexDimensionInfoMap, minMaxCacheColumns, isStreamDataFile),
-              createFilterExecuterTree(filterExpressionResolverTree.getRight(), segmentProperties,
+              createFilterExecutorTree(filterExpressionResolverTree.getRight(), segmentProperties,
                   complexDimensionInfoMap, minMaxCacheColumns, isStreamDataFile));
         case ROWLEVEL_LESSTHAN:
         case ROWLEVEL_LESSTHAN_EQUALTO:
@@ -165,12 +165,12 @@
               (RowLevelRangeFilterResolverImpl) filterExpressionResolverTree;
           if (checkIfCurrentNodeToBeReplacedWithTrueFilterExpression(
               rowLevelRangeFilterResolver.getDimColEvaluatorInfoList(),
-              rowLevelRangeFilterResolver.getMsrColEvalutorInfoList(), segmentProperties,
+              rowLevelRangeFilterResolver.getMsrColEvaluatorInfoList(), segmentProperties,
               minMaxCacheColumns, isStreamDataFile)) {
             return new TrueFilterExecutor();
           }
-          return RowLevelRangeTypeExecuterFactory
-              .getRowLevelRangeTypeExecuter(filterExecuterType, filterExpressionResolverTree,
+          return RowLevelRangeTypeExecutorFactory
+              .getRowLevelRangeTypeExecutor(filterExecutorType, filterExpressionResolverTree,
                   segmentProperties);
         case RANGE:
           // return true filter expression if filter column min/max is not cached in driver
@@ -178,7 +178,7 @@
               segmentProperties, minMaxCacheColumns, isStreamDataFile)) {
             return new TrueFilterExecutor();
           }
-          return new RangeValueFilterExecuterImpl(
+          return new RangeValueFilterExecutorImpl(
               filterExpressionResolverTree.getDimColResolvedFilterInfo(),
               filterExpressionResolverTree.getFilterExpression(),
               ((ConditionalFilterResolverImpl) filterExpressionResolverTree)
@@ -190,14 +190,14 @@
         case ROWLEVEL:
         default:
           if (filterExpressionResolverTree.getFilterExpression() instanceof UnknownExpression) {
-            FilterExecuter filterExecuter =
+            FilterExecutor filterExecutor =
                 ((UnknownExpression) filterExpressionResolverTree.getFilterExpression())
-                    .getFilterExecuter(filterExpressionResolverTree, segmentProperties);
-            if (filterExecuter != null) {
-              return filterExecuter;
+                    .getFilterExecutor(filterExpressionResolverTree, segmentProperties);
+            if (filterExecutor != null) {
+              return filterExecutor;
             }
           }
-          return new RowLevelFilterExecuterImpl(
+          return new RowLevelFilterExecutorImpl(
               ((RowLevelFilterResolverImpl) filterExpressionResolverTree)
                   .getDimColEvaluatorInfoList(),
               ((RowLevelFilterResolverImpl) filterExpressionResolverTree)
@@ -208,7 +208,7 @@
 
       }
     }
-    return new RowLevelFilterExecuterImpl(
+    return new RowLevelFilterExecutorImpl(
         ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getDimColEvaluatorInfoList(),
         ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getMsrColEvalutorInfoList(),
         ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getFilterExpresion(),
@@ -218,13 +218,13 @@
   }
 
   /**
-   * It gives filter executer based on columnar or column group
+   * It gives filter executor based on columnar or column group
    *
    * @param dimColResolvedFilterInfo
    * @param segmentProperties
    * @return
    */
-  private static FilterExecuter getIncludeFilterExecuter(
+  private static FilterExecutor getIncludeFilterExecutor(
       DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
       MeasureColumnResolvedFilterInfo msrColResolvedFilterInfo,
       SegmentProperties segmentProperties) {
@@ -238,7 +238,7 @@
         msrColResolvedFilterInfoCopyObject.setMeasure(measuresFromCurrentBlock);
         msrColResolvedFilterInfoCopyObject.setColumnIndex(measuresFromCurrentBlock.getOrdinal());
         msrColResolvedFilterInfoCopyObject.setType(measuresFromCurrentBlock.getDataType());
-        return new IncludeFilterExecuterImpl(null, msrColResolvedFilterInfoCopyObject,
+        return new IncludeFilterExecutorImpl(null, msrColResolvedFilterInfoCopyObject,
             segmentProperties, true);
       } else {
         return new RestructureIncludeFilterExecutorImpl(dimColResolvedFilterInfo,
@@ -257,7 +257,7 @@
             dimColResolvedFilterInfo.getCopyObject();
         dimColResolvedFilterInfoCopyObject.setDimension(dimensionFromCurrentBlock);
         dimColResolvedFilterInfoCopyObject.setColumnIndex(dimensionFromCurrentBlock.getOrdinal());
-        return new IncludeFilterExecuterImpl(dimColResolvedFilterInfoCopyObject, null,
+        return new IncludeFilterExecutorImpl(dimColResolvedFilterInfoCopyObject, null,
             segmentProperties, false);
       } else {
         return new RestructureIncludeFilterExecutorImpl(dimColResolvedFilterInfo,
@@ -409,13 +409,13 @@
   }
 
   /**
-   * It gives filter executer based on columnar or column group
+   * It gives filter executor based on columnar or column group
    *
    * @param dimColResolvedFilterInfo
    * @param segmentProperties
    * @return
    */
-  private static FilterExecuter getExcludeFilterExecuter(
+  private static FilterExecutor getExcludeFilterExecutor(
       DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
       MeasureColumnResolvedFilterInfo msrColResolvedFilterInfo,
       SegmentProperties segmentProperties) {
@@ -430,7 +430,7 @@
         msrColResolvedFilterInfoCopyObject.setMeasure(measuresFromCurrentBlock);
         msrColResolvedFilterInfoCopyObject.setColumnIndex(measuresFromCurrentBlock.getOrdinal());
         msrColResolvedFilterInfoCopyObject.setType(measuresFromCurrentBlock.getDataType());
-        return new ExcludeFilterExecuterImpl(null, msrColResolvedFilterInfoCopyObject,
+        return new ExcludeFilterExecutorImpl(null, msrColResolvedFilterInfoCopyObject,
             segmentProperties, true);
       } else {
         return new RestructureExcludeFilterExecutorImpl(dimColResolvedFilterInfo,
@@ -445,7 +445,7 @@
           dimColResolvedFilterInfo.getCopyObject();
       dimColResolvedFilterInfoCopyObject.setDimension(dimensionFromCurrentBlock);
       dimColResolvedFilterInfoCopyObject.setColumnIndex(dimensionFromCurrentBlock.getOrdinal());
-      return new ExcludeFilterExecuterImpl(dimColResolvedFilterInfoCopyObject, null,
+      return new ExcludeFilterExecutorImpl(dimColResolvedFilterInfoCopyObject, null,
           segmentProperties, false);
     } else {
       return new RestructureExcludeFilterExecutorImpl(dimColResolvedFilterInfo,
@@ -567,7 +567,7 @@
       throw new FilterUnsupportedException("Unsupported Filter condition: " + result, ex);
     }
 
-    java.util.Comparator<byte[]> filterNoDictValueComaparator = new java.util.Comparator<byte[]>() {
+    java.util.Comparator<byte[]> filterNoDictValueComparator = new java.util.Comparator<byte[]>() {
 
       @Override
       public int compare(byte[] filterMember1, byte[] filterMember2) {
@@ -576,7 +576,7 @@
       }
 
     };
-    Collections.sort(filterValuesList, filterNoDictValueComaparator);
+    Collections.sort(filterValuesList, filterNoDictValueComparator);
     ColumnFilterInfo columnFilterInfo = null;
     if (filterValuesList.size() > 0) {
       columnFilterInfo = new ColumnFilterInfo();
@@ -618,9 +618,9 @@
       throw new FilterUnsupportedException("Unsupported Filter condition: " + result, ex);
     }
 
-    SerializableComparator filterMeasureComaparator =
+    SerializableComparator filterMeasureComparator =
         Comparator.getComparatorByDataTypeForMeasure(dataType);
-    Collections.sort(filterValuesList, filterMeasureComaparator);
+    Collections.sort(filterValuesList, filterMeasureComparator);
     ColumnFilterInfo columnFilterInfo = null;
     if (filterValuesList.size() > 0) {
       columnFilterInfo = new ColumnFilterInfo();
@@ -665,14 +665,14 @@
       boolean isExclude, int[] keys, List<byte[]> filterValuesList,
       int keyOrdinalOfDimensionFromCurrentBlock) {
     if (null != columnFilterInfo) {
-      List<Integer> listOfsurrogates = null;
+      List<Integer> listOfSurrogates = null;
       if (!isExclude && columnFilterInfo.isIncludeFilter()) {
-        listOfsurrogates = columnFilterInfo.getFilterList();
+        listOfSurrogates = columnFilterInfo.getFilterList();
       } else if (isExclude || !columnFilterInfo.isIncludeFilter()) {
-        listOfsurrogates = columnFilterInfo.getExcludeFilterList();
+        listOfSurrogates = columnFilterInfo.getExcludeFilterList();
       }
-      if (null != listOfsurrogates) {
-        for (Integer surrogate : listOfsurrogates) {
+      if (null != listOfSurrogates) {
+        for (Integer surrogate : listOfSurrogates) {
           keys[keyOrdinalOfDimensionFromCurrentBlock] = surrogate;
           filterValuesList.add(ByteUtil.convertIntToBytes(surrogate));
         }
@@ -686,14 +686,14 @@
   private static byte[][] getFilterValueInBytesForDictRange(ColumnFilterInfo columnFilterInfo,
       int[] keys, List<byte[]> filterValuesList, int keyOrdinalOfDimensionFromCurrentBlock) {
     if (null != columnFilterInfo) {
-      List<Integer> listOfsurrogates = columnFilterInfo.getFilterList();
-      if (listOfsurrogates == null || listOfsurrogates.size() > 1) {
+      List<Integer> listOfSurrogates = columnFilterInfo.getFilterList();
+      if (listOfSurrogates == null || listOfSurrogates.size() > 1) {
         throw new RuntimeException(
             "Filter values cannot be null in case of range in dictionary include");
       }
       // Here we only get the first column as there can be only one range column.
-      keys[keyOrdinalOfDimensionFromCurrentBlock] = listOfsurrogates.get(0);
-      filterValuesList.add(ByteUtil.convertIntToBytes(listOfsurrogates.get(0)));
+      keys[keyOrdinalOfDimensionFromCurrentBlock] = listOfSurrogates.get(0);
+      filterValuesList.add(ByteUtil.convertIntToBytes(listOfSurrogates.get(0)));
     }
     return filterValuesList.toArray(new byte[filterValuesList.size()][]);
   }
@@ -723,7 +723,7 @@
 
   /**
    * Below method will be used to convert the filter surrogate keys
-   * to mdkey
+   * to MDKey
    *
    * @param columnFilterInfo
    * @param carbonDimension
@@ -753,31 +753,31 @@
   }
 
   /**
-   * API will create an filter executer tree based on the filter resolver
+   * API will create an filter executor tree based on the filter resolver
    *
    * @param filterExpressionResolverTree
    * @param segmentProperties
    * @return
    */
-  public static FilterExecuter getFilterExecuterTree(
+  public static FilterExecutor getFilterExecutorTree(
       FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
       Map<Integer, GenericQueryType> complexDimensionInfoMap, boolean isStreamDataFile) {
-    return getFilterExecuterTree(filterExpressionResolverTree, segmentProperties,
+    return getFilterExecutorTree(filterExpressionResolverTree, segmentProperties,
         complexDimensionInfoMap, null, isStreamDataFile);
   }
 
   /**
-   * API will create an filter executer tree based on the filter resolver and minMaxColumns
+   * API will create an filter executor tree based on the filter resolver and minMaxColumns
    *
    * @param filterExpressionResolverTree
    * @param segmentProperties
    * @return
    */
-  public static FilterExecuter getFilterExecuterTree(
+  public static FilterExecutor getFilterExecutorTree(
       FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
       Map<Integer, GenericQueryType> complexDimensionInfoMap,
       List<CarbonColumn> minMaxCacheColumns, boolean isStreamDataFile) {
-    return createFilterExecuterTree(filterExpressionResolverTree, segmentProperties,
+    return createFilterExecutorTree(filterExpressionResolverTree, segmentProperties,
         complexDimensionInfoMap, minMaxCacheColumns, isStreamDataFile);
   }
 
@@ -787,12 +787,12 @@
    * @param filterValues
    * @param segmentProperties
    * @param dimension
-   * @param dimColumnExecuterInfo
+   * @param dimColumnExecutorInfo
    */
   public static void prepareKeysFromSurrogates(ColumnFilterInfo filterValues,
       SegmentProperties segmentProperties, CarbonDimension dimension,
-      DimColumnExecuterFilterInfo dimColumnExecuterInfo, CarbonMeasure measures,
-      MeasureColumnExecuterFilterInfo msrColumnExecuterInfo) {
+      DimColumnExecutorFilterInfo dimColumnExecutorInfo, CarbonMeasure measures,
+      MeasureColumnExecutorFilterInfo msrColumnExecutorInfo) {
     if (null != measures) {
       DataType filterColumnDataType = DataTypes.valueOf(measures.getDataType().getId());
       DataTypeConverterImpl converter = new DataTypeConverterImpl();
@@ -805,18 +805,18 @@
                   converter);
         }
       }
-      msrColumnExecuterInfo.setFilterKeys(keysBasedOnFilter, filterColumnDataType);
+      msrColumnExecutorInfo.setFilterKeys(keysBasedOnFilter, filterColumnDataType);
     } else {
       if (filterValues == null) {
-        dimColumnExecuterInfo.setFilterKeys(new byte[0][]);
+        dimColumnExecutorInfo.setFilterKeys(new byte[0][]);
       } else {
         byte[][] keysBasedOnFilter =
             getKeyArray(filterValues, dimension, segmentProperties, false, false);
         if (!filterValues.isIncludeFilter() || filterValues.isOptimized()) {
-          dimColumnExecuterInfo.setExcludeFilterKeys(
+          dimColumnExecutorInfo.setExcludeFilterKeys(
               getKeyArray(filterValues, dimension, segmentProperties, true, false));
         }
-        dimColumnExecuterInfo.setFilterKeys(keysBasedOnFilter);
+        dimColumnExecutorInfo.setFilterKeys(keysBasedOnFilter);
       }
     }
   }
@@ -961,7 +961,7 @@
     }
   }
 
-  public static void updateIndexOfColumnExpression(Expression exp, int dimOridnalMax) {
+  public static void updateIndexOfColumnExpression(Expression exp, int dimOrdinalMax) {
     // if expression is null, not require to update index.
     if (exp == null) {
       return;
@@ -973,14 +973,14 @@
         if (column.isDimension()) {
           ce.setColIndex(column.getOrdinal());
         } else {
-          ce.setColIndex(dimOridnalMax + column.getOrdinal());
+          ce.setColIndex(dimOrdinalMax + column.getOrdinal());
         }
       }
     } else {
       if (exp.getChildren().size() > 0) {
         List<Expression> children = exp.getChildren();
         for (int i = 0; i < children.size(); i++) {
-          updateIndexOfColumnExpression(children.get(i), dimOridnalMax);
+          updateIndexOfColumnExpression(children.get(i), dimOrdinalMax);
         }
       }
     }
@@ -1044,7 +1044,7 @@
   }
 
   /**
-   * This methdd will check if ImplictFilter is present or not
+   * This method will check if ImplicitFilter is present or not
    * if it is present then return that ImplicitFilterExpression
    *
    * @param expression
@@ -1062,7 +1062,7 @@
           if (childExpression instanceof ColumnExpression && ((ColumnExpression) childExpression)
               .getColumnName().equalsIgnoreCase(CarbonCommonConstants.POSITION_ID)) {
             // Remove the right expression node and point the expression to left node expression
-            // if 1st children is implict column positionID then 2nd children will be
+            // if 1st children is implicit column positionID then 2nd children will be
             // implicit filter list
             return children.get(1);
           }
@@ -1130,13 +1130,13 @@
    * @return sorted encoded filter values
    */
   private static byte[][] getSortedEncodedFilters(List<byte[]> encodedFilters) {
-    java.util.Comparator<byte[]> filterNoDictValueComaparator = new java.util.Comparator<byte[]>() {
+    java.util.Comparator<byte[]> filterNoDictValueComparator = new java.util.Comparator<byte[]>() {
       @Override
       public int compare(byte[] filterMember1, byte[] filterMember2) {
         return ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterMember1, filterMember2);
       }
     };
-    Collections.sort(encodedFilters, filterNoDictValueComaparator);
+    Collections.sort(encodedFilters, filterNoDictValueComparator);
     return encodedFilters.toArray(new byte[encodedFilters.size()][]);
   }
 
@@ -1165,9 +1165,9 @@
             Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
         row.setValues(new Object[] { DataTypeUtil.getDataBasedOnDataType(stringValue,
             columnExpression.getCarbonColumn().getDataType()) });
-        Boolean rslt = expression.evaluate(row).getBoolean();
-        if (null != rslt) {
-          if (rslt) {
+        Boolean result = expression.evaluate(row).getBoolean();
+        if (null != result) {
+          if (result) {
             includeFilterBitSet.set(i);
           }
         }
@@ -1217,7 +1217,7 @@
 
   /**
    * Below method will be used to get filter executor instance for range filters
-   * when local dictonary is present for in blocklet
+   * when local dictionary is present for in blocklet
    * @param rawColumnChunk
    * raw column chunk
    * @param exp
@@ -1226,7 +1226,7 @@
    * is data was already sorted
    * @return
    */
-  public static FilterExecuter getFilterExecutorForRangeFilters(
+  public static FilterExecutor getFilterExecutorForRangeFilters(
       DimensionRawColumnChunk rawColumnChunk, Expression exp, boolean isNaturalSorted) {
     BitSet includeDictionaryValues;
     try {
@@ -1241,13 +1241,13 @@
     byte[][] encodedFilterValues = FilterUtil
         .getEncodedFilterValuesForRange(includeDictionaryValues,
             rawColumnChunk.getLocalDictionary(), isExclude);
-    FilterExecuter filterExecuter;
+    FilterExecutor filterExecutor;
     if (!isExclude) {
-      filterExecuter = new IncludeFilterExecuterImpl(encodedFilterValues, isNaturalSorted);
+      filterExecutor = new IncludeFilterExecutorImpl(encodedFilterValues, isNaturalSorted);
     } else {
-      filterExecuter = new ExcludeFilterExecuterImpl(encodedFilterValues, isNaturalSorted);
+      filterExecutor = new ExcludeFilterExecutorImpl(encodedFilterValues, isNaturalSorted);
     }
-    return filterExecuter;
+    return filterExecutor;
   }
 
   /**
@@ -1268,7 +1268,7 @@
       Object value =
           DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(minMaxBytes, dataType);
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       Object data = DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(filterValue, dataType);
       SerializableComparator comparator = Comparator.getComparator(dataType);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/AndFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/AndFilterExecutorImpl.java
similarity index 72%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/AndFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/AndFilterExecutorImpl.java
index bbacc71..cca4231 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/AndFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/AndFilterExecutorImpl.java
@@ -25,24 +25,24 @@
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 
-public class AndFilterExecuterImpl implements FilterExecuter, ImplicitColumnFilterExecutor {
+public class AndFilterExecutorImpl implements FilterExecutor, ImplicitColumnFilterExecutor {
 
-  private FilterExecuter leftExecuter;
-  private FilterExecuter rightExecuter;
+  private FilterExecutor leftExecutor;
+  private FilterExecutor rightExecutor;
 
-  public AndFilterExecuterImpl(FilterExecuter leftExecuter, FilterExecuter rightExecuter) {
-    this.leftExecuter = leftExecuter;
-    this.rightExecuter = rightExecuter;
+  public AndFilterExecutorImpl(FilterExecutor leftExecutor, FilterExecutor rightExecuter) {
+    this.leftExecutor = leftExecutor;
+    this.rightExecutor = rightExecuter;
   }
 
   @Override
   public BitSetGroup applyFilter(RawBlockletColumnChunks rawBlockletColumnChunks,
       boolean useBitsetPipeLine) throws FilterUnsupportedException, IOException {
-    BitSetGroup leftFilters = leftExecuter.applyFilter(rawBlockletColumnChunks, useBitsetPipeLine);
+    BitSetGroup leftFilters = leftExecutor.applyFilter(rawBlockletColumnChunks, useBitsetPipeLine);
     if (leftFilters.isEmpty()) {
       return leftFilters;
     }
-    BitSetGroup rightFilter = rightExecuter.applyFilter(rawBlockletColumnChunks, useBitsetPipeLine);
+    BitSetGroup rightFilter = rightExecutor.applyFilter(rawBlockletColumnChunks, useBitsetPipeLine);
     if (rightFilter.isEmpty()) {
       return rightFilter;
     }
@@ -54,11 +54,11 @@
   @Override
   public BitSet prunePages(RawBlockletColumnChunks rawBlockletColumnChunks)
       throws FilterUnsupportedException, IOException {
-    BitSet leftFilters = leftExecuter.prunePages(rawBlockletColumnChunks);
+    BitSet leftFilters = leftExecutor.prunePages(rawBlockletColumnChunks);
     if (leftFilters.isEmpty()) {
       return leftFilters;
     }
-    BitSet rightFilter = rightExecuter.prunePages(rawBlockletColumnChunks);
+    BitSet rightFilter = rightExecutor.prunePages(rawBlockletColumnChunks);
     if (rightFilter.isEmpty()) {
       return rightFilter;
     }
@@ -69,18 +69,18 @@
   @Override
   public boolean applyFilter(RowIntf value, int dimOrdinalMax)
       throws FilterUnsupportedException, IOException {
-    return leftExecuter.applyFilter(value, dimOrdinalMax) &&
-        rightExecuter.applyFilter(value, dimOrdinalMax);
+    return leftExecutor.applyFilter(value, dimOrdinalMax) &&
+        rightExecutor.applyFilter(value, dimOrdinalMax);
   }
 
   @Override
   public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue,
       boolean[] isMinMaxSet) {
-    BitSet leftFilters = leftExecuter.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
+    BitSet leftFilters = leftExecutor.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
     if (leftFilters.isEmpty()) {
       return leftFilters;
     }
-    BitSet rightFilter = rightExecuter.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
+    BitSet rightFilter = rightExecutor.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
     if (rightFilter.isEmpty()) {
       return rightFilter;
     }
@@ -90,30 +90,30 @@
 
   @Override
   public void readColumnChunks(RawBlockletColumnChunks rawBlockletColumnChunks) throws IOException {
-    leftExecuter.readColumnChunks(rawBlockletColumnChunks);
-    rightExecuter.readColumnChunks(rawBlockletColumnChunks);
+    leftExecutor.readColumnChunks(rawBlockletColumnChunks);
+    rightExecutor.readColumnChunks(rawBlockletColumnChunks);
   }
 
   @Override
   public BitSet isFilterValuesPresentInBlockOrBlocklet(byte[][] maxValue, byte[][] minValue,
       String uniqueBlockPath, boolean[] isMinMaxSet) {
     BitSet leftFilters = null;
-    if (leftExecuter instanceof ImplicitColumnFilterExecutor) {
-      leftFilters = ((ImplicitColumnFilterExecutor) leftExecuter)
+    if (leftExecutor instanceof ImplicitColumnFilterExecutor) {
+      leftFilters = ((ImplicitColumnFilterExecutor) leftExecutor)
           .isFilterValuesPresentInBlockOrBlocklet(maxValue, minValue, uniqueBlockPath, isMinMaxSet);
     } else {
-      leftFilters = leftExecuter
+      leftFilters = leftExecutor
           .isScanRequired(maxValue, minValue, isMinMaxSet);
     }
     if (leftFilters.isEmpty()) {
       return leftFilters;
     }
     BitSet rightFilter = null;
-    if (rightExecuter instanceof ImplicitColumnFilterExecutor) {
-      rightFilter = ((ImplicitColumnFilterExecutor) rightExecuter)
+    if (rightExecutor instanceof ImplicitColumnFilterExecutor) {
+      rightFilter = ((ImplicitColumnFilterExecutor) rightExecutor)
           .isFilterValuesPresentInBlockOrBlocklet(maxValue, minValue, uniqueBlockPath, isMinMaxSet);
     } else {
-      rightFilter = rightExecuter.isScanRequired(maxValue, minValue, isMinMaxSet);
+      rightFilter = rightExecutor.isScanRequired(maxValue, minValue, isMinMaxSet);
     }
     if (rightFilter.isEmpty()) {
       return rightFilter;
@@ -127,11 +127,11 @@
       boolean[] isMinMaxSet) {
     Boolean leftRes;
     BitSet tempFilter;
-    if (leftExecuter instanceof ImplicitColumnFilterExecutor) {
-      leftRes = ((ImplicitColumnFilterExecutor) leftExecuter)
+    if (leftExecutor instanceof ImplicitColumnFilterExecutor) {
+      leftRes = ((ImplicitColumnFilterExecutor) leftExecutor)
           .isFilterValuesPresentInAbstractIndex(maxValue, minValue, isMinMaxSet);
     } else {
-      tempFilter = leftExecuter
+      tempFilter = leftExecutor
           .isScanRequired(maxValue, minValue, isMinMaxSet);
       leftRes = !tempFilter.isEmpty();
     }
@@ -140,11 +140,11 @@
     }
 
     Boolean rightRes = null;
-    if (rightExecuter instanceof ImplicitColumnFilterExecutor) {
-      rightRes = ((ImplicitColumnFilterExecutor) rightExecuter)
+    if (rightExecutor instanceof ImplicitColumnFilterExecutor) {
+      rightRes = ((ImplicitColumnFilterExecutor) rightExecutor)
           .isFilterValuesPresentInAbstractIndex(maxValue, minValue, isMinMaxSet);
     } else {
-      tempFilter = rightExecuter
+      tempFilter = rightExecutor
           .isScanRequired(maxValue, minValue, isMinMaxSet);
       rightRes = !tempFilter.isEmpty();
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/BitSetUpdaterFactory.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/BitSetUpdaterFactory.java
index 375ba61..0dcc945 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/BitSetUpdaterFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/BitSetUpdaterFactory.java
@@ -19,7 +19,7 @@
 
 import java.util.BitSet;
 
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 
 /**
  * Class for updating the bitset
@@ -30,15 +30,15 @@
 
   public static final BitSetUpdaterFactory INSTANCE = new BitSetUpdaterFactory();
 
-  public FilterBitSetUpdater getBitSetUpdater(FilterExecuterType filterExecuterType) {
-    switch (filterExecuterType) {
+  public FilterBitSetUpdater getBitSetUpdater(FilterExecutorType filterExecutorType) {
+    switch (filterExecutorType) {
       case INCLUDE:
         return new IncludeFilterBitSetUpdater();
       case EXCLUDE:
         return new ExcludeFilterBitSetUpdater();
       default:
         throw new UnsupportedOperationException(
-            "Invalid filter executor type:" + filterExecuterType);
+            "Invalid filter executor type:" + filterExecutorType);
     }
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/DimColumnExecuterFilterInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/DimColumnExecutorFilterInfo.java
similarity index 96%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/DimColumnExecuterFilterInfo.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/DimColumnExecutorFilterInfo.java
index 6291de2..2d3cc0f 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/DimColumnExecuterFilterInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/DimColumnExecutorFilterInfo.java
@@ -17,7 +17,7 @@
 
 package org.apache.carbondata.core.scan.filter.executer;
 
-public class DimColumnExecuterFilterInfo {
+public class DimColumnExecutorFilterInfo {
 
   byte[][] filterKeys;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecutorImpl.java
similarity index 93%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecutorImpl.java
index baa89de..845245c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecutorImpl.java
@@ -28,7 +28,7 @@
 import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.scan.filter.FilterExecutorUtil;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
@@ -40,12 +40,12 @@
 import org.apache.carbondata.core.util.comparator.Comparator;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
-public class ExcludeFilterExecuterImpl implements FilterExecuter {
+public class ExcludeFilterExecutorImpl implements FilterExecutor {
 
   private DimColumnResolvedFilterInfo dimColEvaluatorInfo;
-  private DimColumnExecuterFilterInfo dimColumnExecuterInfo;
+  private DimColumnExecutorFilterInfo dimColumnExecuterInfo;
   private MeasureColumnResolvedFilterInfo msrColumnEvaluatorInfo;
-  private MeasureColumnExecuterFilterInfo msrColumnExecutorInfo;
+  private MeasureColumnExecutorFilterInfo msrColumnExecutorInfo;
   protected SegmentProperties segmentProperties;
   private boolean isDimensionPresentInCurrentBlock = false;
   private boolean isMeasurePresentInCurrentBlock = false;
@@ -59,22 +59,22 @@
 
   private FilterBitSetUpdater filterBitSetUpdater;
 
-  public ExcludeFilterExecuterImpl(byte[][] filterValues, boolean isNaturalSorted) {
+  public ExcludeFilterExecutorImpl(byte[][] filterValues, boolean isNaturalSorted) {
     this.filterValues = filterValues;
     this.isNaturalSorted = isNaturalSorted;
     this.filterBitSetUpdater =
-        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecuterType.EXCLUDE);
+        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecutorType.EXCLUDE);
   }
 
-  public ExcludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
+  public ExcludeFilterExecutorImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
       MeasureColumnResolvedFilterInfo msrColumnEvaluatorInfo, SegmentProperties segmentProperties,
       boolean isMeasure) {
     this.filterBitSetUpdater =
-        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecuterType.EXCLUDE);
+        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecutorType.EXCLUDE);
     this.segmentProperties = segmentProperties;
     if (!isMeasure) {
       this.dimColEvaluatorInfo = dimColEvaluatorInfo;
-      dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
+      dimColumnExecuterInfo = new DimColumnExecutorFilterInfo();
 
       FilterUtil.prepareKeysFromSurrogates(dimColEvaluatorInfo.getFilterValues(), segmentProperties,
           dimColEvaluatorInfo.getDimension(), dimColumnExecuterInfo, null, null);
@@ -84,7 +84,7 @@
               .getDimension().isSortColumn();
     } else {
       this.msrColumnEvaluatorInfo = msrColumnEvaluatorInfo;
-      msrColumnExecutorInfo = new MeasureColumnExecuterFilterInfo();
+      msrColumnExecutorInfo = new MeasureColumnExecutorFilterInfo();
       FilterUtil
           .prepareKeysFromSurrogates(msrColumnEvaluatorInfo.getFilterValues(), segmentProperties,
               null, null, msrColumnEvaluatorInfo.getMeasure(), msrColumnExecutorInfo);
@@ -190,12 +190,12 @@
     return true;
   }
 
-  private BitSet getFilteredIndexes(ColumnPage columnPage, int numerOfRows, DataType msrType) {
+  private BitSet getFilteredIndexes(ColumnPage columnPage, int numberOfRows, DataType msrType) {
     // Here the algorithm is
     // Get the measure values from the chunk. compare sequentially with the
     // the filter values. The one that matches sets it Bitset.
-    BitSet bitSet = new BitSet(numerOfRows);
-    bitSet.flip(0, numerOfRows);
+    BitSet bitSet = new BitSet(numberOfRows);
+    bitSet.flip(0, numberOfRows);
     FilterExecutorUtil.executeIncludeExcludeFilterForMeasure(columnPage, bitSet,
         msrColumnExecutorInfo, msrColumnEvaluatorInfo, filterBitSetUpdater);
     return bitSet;
@@ -230,7 +230,7 @@
    * @param pageNumber
    * @param numberOfRows
    * @param msrDataType
-   * @return filtred indexes bitset
+   * @return filtered indexes bitset
    */
   private BitSet getFilteredIndexesForMsrUsingPrvBitSet(ColumnPage measureColumnPage,
       BitSetGroup prvBitSetGroup, int pageNumber, int numberOfRows, DataType msrDataType) {
@@ -280,7 +280,7 @@
     if (filterValues.length > 0 && CarbonUtil
         .usePreviousFilterBitsetGroup(useBitsetPipeLine, prvBitSetGroup, pageNumber,
             filterValues.length)) {
-      return getFilteredIndexesUisngPrvBitset(dimensionColumnPage, prvBitSetGroup, pageNumber);
+      return getFilteredIndexesUsingPrvBitset(dimensionColumnPage, prvBitSetGroup, pageNumber);
     } else {
       return getFilteredIndexes(dimensionColumnPage, numberOfRows);
     }
@@ -289,9 +289,9 @@
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
       int numberOfRows) {
     if (dimensionColumnPage.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
+      return setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
     }
-    return setFilterdIndexToBitSet(dimensionColumnPage, numberOfRows);
+    return setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows);
   }
 
   /**
@@ -301,7 +301,7 @@
    * @param pageNumber
    * @return filtered indexes bitset
    */
-  private BitSet getFilteredIndexesUisngPrvBitset(DimensionColumnPage dimensionColumnPage,
+  private BitSet getFilteredIndexesUsingPrvBitset(DimensionColumnPage dimensionColumnPage,
       BitSetGroup prvBitSetGroup, int pageNumber) {
     BitSet prvPageBitSet = prvBitSetGroup.getBitSet(pageNumber);
     if (prvPageBitSet == null || prvPageBitSet.isEmpty()) {
@@ -343,7 +343,7 @@
     return bitSet;
   }
 
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
       DimensionColumnPage dimensionColumnPage, int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     bitSet.flip(0, numerOfRows);
@@ -368,7 +368,7 @@
     return bitSet;
   }
 
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     bitSet.flip(0, numerOfRows);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
index 4aa74ef..383deff 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
@@ -23,7 +23,7 @@
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 
-public class FalseFilterExecutor implements FilterExecuter {
+public class FalseFilterExecutor implements FilterExecutor {
 
   @Override
   public BitSetGroup applyFilter(RawBlockletColumnChunks rawChunks, boolean useBitsetPipeline) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FilterExecuter.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FilterExecutor.java
similarity index 98%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FilterExecuter.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FilterExecutor.java
index 6077e84..8db775a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FilterExecuter.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FilterExecutor.java
@@ -25,7 +25,7 @@
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 
-public interface FilterExecuter {
+public interface FilterExecutor {
 
   /**
    * API will apply filter based on resolver instance
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ImplicitIncludeFilterExecutorImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ImplicitIncludeFilterExecutorImpl.java
index ebf88a6..a027d5d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ImplicitIncludeFilterExecutorImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ImplicitIncludeFilterExecutorImpl.java
@@ -32,7 +32,7 @@
  * on the implicit column filter values
  */
 public class ImplicitIncludeFilterExecutorImpl
-    implements FilterExecuter, ImplicitColumnFilterExecutor {
+    implements FilterExecutor, ImplicitColumnFilterExecutor {
 
   private final DimColumnResolvedFilterInfo dimColumnEvaluatorInfo;
 
@@ -111,7 +111,7 @@
 
   /**
    * For implicit column filtering, complete data need to be selected. As it is a special case
-   * no data need to be discarded, implicit filtering is only for slecting block and blocklets
+   * no data need to be discarded, implicit filtering is only for selecting block and blocklets
    *
    * @param numberOfRows
    * @return
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecutorImpl.java
similarity index 93%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecutorImpl.java
index 41a8f85..7ab5716 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecutorImpl.java
@@ -30,7 +30,7 @@
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.scan.filter.FilterExecutorUtil;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
@@ -42,12 +42,12 @@
 import org.apache.carbondata.core.util.comparator.Comparator;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
-public class IncludeFilterExecuterImpl implements FilterExecuter {
+public class IncludeFilterExecutorImpl implements FilterExecutor {
 
   protected DimColumnResolvedFilterInfo dimColumnEvaluatorInfo;
-  DimColumnExecuterFilterInfo dimColumnExecuterInfo;
+  DimColumnExecutorFilterInfo dimColumnExecutorInfo;
   private MeasureColumnResolvedFilterInfo msrColumnEvaluatorInfo;
-  private MeasureColumnExecuterFilterInfo msrColumnExecutorInfo;
+  private MeasureColumnExecutorFilterInfo msrColumnExecutorInfo;
   protected SegmentProperties segmentProperties;
   private boolean isDimensionPresentInCurrentBlock = false;
   private boolean isMeasurePresentInCurrentBlock = false;
@@ -61,25 +61,25 @@
 
   private FilterBitSetUpdater filterBitSetUpdater;
 
-  public IncludeFilterExecuterImpl(byte[][] filterValues, boolean isNaturalSorted) {
+  public IncludeFilterExecutorImpl(byte[][] filterValues, boolean isNaturalSorted) {
     this.filterValues = filterValues;
     this.isNaturalSorted = isNaturalSorted;
     this.filterBitSetUpdater =
-        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecuterType.INCLUDE);
+        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecutorType.INCLUDE);
   }
 
-  public IncludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
+  public IncludeFilterExecutorImpl(DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
       MeasureColumnResolvedFilterInfo msrColumnEvaluatorInfo, SegmentProperties segmentProperties,
       boolean isMeasure) {
     this.filterBitSetUpdater =
-        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecuterType.INCLUDE);
+        BitSetUpdaterFactory.INSTANCE.getBitSetUpdater(FilterExecutorType.INCLUDE);
     this.segmentProperties = segmentProperties;
     if (!isMeasure) {
       this.dimColumnEvaluatorInfo = dimColumnEvaluatorInfo;
-      dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
+      dimColumnExecutorInfo = new DimColumnExecutorFilterInfo();
       FilterUtil
           .prepareKeysFromSurrogates(dimColumnEvaluatorInfo.getFilterValues(), segmentProperties,
-              dimColumnEvaluatorInfo.getDimension(), dimColumnExecuterInfo, null, null);
+              dimColumnEvaluatorInfo.getDimension(), dimColumnExecutorInfo, null, null);
       isDimensionPresentInCurrentBlock = true;
       isNaturalSorted =
           dimColumnEvaluatorInfo.getDimension().isUseInvertedIndex() && dimColumnEvaluatorInfo
@@ -87,7 +87,7 @@
 
     } else {
       this.msrColumnEvaluatorInfo = msrColumnEvaluatorInfo;
-      msrColumnExecutorInfo = new MeasureColumnExecuterFilterInfo();
+      msrColumnExecutorInfo = new MeasureColumnExecutorFilterInfo();
       comparator =
           Comparator.getComparatorByDataTypeForMeasure(
               FilterUtil.getMeasureDataType(msrColumnEvaluatorInfo));
@@ -113,7 +113,7 @@
       DimensionRawColumnChunk dimensionRawColumnChunk =
           rawBlockletColumnChunks.getDimensionRawColumnChunks()[chunkIndex];
       BitSetGroup bitSetGroup = new BitSetGroup(dimensionRawColumnChunk.getPagesCount());
-      filterValues = dimColumnExecuterInfo.getFilterKeys();
+      filterValues = dimColumnExecutorInfo.getFilterKeys();
       boolean isDecoded = false;
       for (int i = 0; i < dimensionRawColumnChunk.getPagesCount(); i++) {
         if (dimensionRawColumnChunk.getMaxValues() != null) {
@@ -122,7 +122,7 @@
             if (!isDecoded) {
               filterValues =  FilterUtil
                   .getEncodedFilterValues(dimensionRawColumnChunk.getLocalDictionary(),
-                      dimColumnExecuterInfo.getFilterKeys());
+                      dimColumnExecutorInfo.getFilterKeys());
               isDecoded = true;
             }
             BitSet bitSet = getFilteredIndexes(dimensionColumnPage,
@@ -182,12 +182,12 @@
         && dimColumnEvaluatorInfo.getDimension().getDataType() != DataTypes.DATE) {
       scanRequired = isScanRequired(dimensionRawColumnChunk.getMaxValues()[columnIndex],
           dimensionRawColumnChunk.getMinValues()[columnIndex],
-          dimColumnExecuterInfo.getFilterKeys(),
+          dimColumnExecutorInfo.getFilterKeys(),
           dimColumnEvaluatorInfo.getDimension().getDataType());
     } else {
       scanRequired = isScanRequired(dimensionRawColumnChunk.getMaxValues()[columnIndex],
           dimensionRawColumnChunk.getMinValues()[columnIndex],
-          dimColumnExecuterInfo.getFilterKeys(),
+          dimColumnExecutorInfo.getFilterKeys(),
           dimensionRawColumnChunk.getMinMaxFlagArray()[columnIndex]);
     }
     return scanRequired;
@@ -206,7 +206,7 @@
       }
       DimensionRawColumnChunk dimensionRawColumnChunk =
           rawBlockletColumnChunks.getDimensionRawColumnChunks()[chunkIndex];
-      filterValues = dimColumnExecuterInfo.getFilterKeys();
+      filterValues = dimColumnExecutorInfo.getFilterKeys();
       BitSet bitSet = new BitSet(dimensionRawColumnChunk.getPagesCount());
       for (int i = 0; i < dimensionRawColumnChunk.getPagesCount(); i++) {
         if (dimensionRawColumnChunk.getMaxValues() != null) {
@@ -248,7 +248,7 @@
   @Override
   public boolean applyFilter(RowIntf value, int dimOrdinalMax) {
     if (isDimensionPresentInCurrentBlock) {
-      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+      byte[][] filterValues = dimColumnExecutorInfo.getFilterKeys();
       byte[] col = (byte[])value.getVal(dimColumnEvaluatorInfo.getDimension().getOrdinal());
       for (int i = 0; i < filterValues.length; i++) {
         if (0 == ByteUtil.UnsafeComparer.INSTANCE.compareTo(col, 0, col.length,
@@ -314,7 +314,7 @@
    * @param pageNumber
    * @param numberOfRows
    * @param msrDataType
-   * @return filtred indexes bitset
+   * @return filtered indexes bitset
    */
   private BitSet getFilteredIndexesForMsrUsingPrvBitSet(ColumnPage measureColumnPage,
       BitSetGroup prvBitSetGroup, int pageNumber, int numberOfRows, DataType msrDataType) {
@@ -363,7 +363,7 @@
     if (filterValues.length > 0 && CarbonUtil
         .usePreviousFilterBitsetGroup(useBitsetPipeLine, prvBitSetGroup, pageNumber,
             filterValues.length)) {
-      return getFilteredIndexesUisngPrvBitset(dimensionColumnPage, prvBitSetGroup, pageNumber,
+      return getFilteredIndexesUsingPrvBitset(dimensionColumnPage, prvBitSetGroup, pageNumber,
           numberOfRows);
     } else {
       return getFilteredIndexes(dimensionColumnPage, numberOfRows);
@@ -373,9 +373,9 @@
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
       int numberOfRows) {
     if (dimensionColumnPage.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
+      return setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
     }
-    return setFilterdIndexToBitSet(dimensionColumnPage, numberOfRows);
+    return setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows);
   }
 
   /**
@@ -387,7 +387,7 @@
    * @param numberOfRows
    * @return filtered bitset
    */
-  private BitSet getFilteredIndexesUisngPrvBitset(DimensionColumnPage dimensionColumnPage,
+  private BitSet getFilteredIndexesUsingPrvBitset(DimensionColumnPage dimensionColumnPage,
       BitSetGroup prvBitSetGroup, int pageNumber, int numberOfRows) {
     BitSet prvPageBitSet = prvBitSetGroup.getBitSet(pageNumber);
     if (prvPageBitSet == null || prvPageBitSet.isEmpty()) {
@@ -420,7 +420,7 @@
     return bitSet;
   }
 
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
       DimensionColumnPage dimensionColumnPage, int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     if (filterValues.length == 0) {
@@ -444,9 +444,9 @@
     return bitSet;
   }
 
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     if (filterValues.length == 0) {
       return bitSet;
     }
@@ -455,11 +455,11 @@
     if (isNaturalSorted && dimensionColumnPage.isExplicitSorted()) {
       int startIndex = 0;
       for (int i = 0; i < filterValues.length; i++) {
-        if (startIndex >= numerOfRows) {
+        if (startIndex >= numberOfRows) {
           break;
         }
         int[] rangeIndex = CarbonUtil
-            .getRangeIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+            .getRangeIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
                 filterValues[i]);
         for (int j = rangeIndex[0]; j <= rangeIndex[1]; j++) {
           bitSet.set(j);
@@ -470,7 +470,7 @@
       }
     } else {
       if (filterValues.length > 1) {
-        for (int i = 0; i < numerOfRows; i++) {
+        for (int i = 0; i < numberOfRows; i++) {
           int index = CarbonUtil.binarySearch(filterValues, 0, filterValues.length - 1,
               dimensionColumnPage, i);
           if (index >= 0) {
@@ -478,7 +478,7 @@
           }
         }
       } else {
-        for (int j = 0; j < numerOfRows; j++) {
+        for (int j = 0; j < numberOfRows; j++) {
           if (dimensionColumnPage.compareTo(j, filterValues[0]) == 0) {
             bitSet.set(j);
           }
@@ -496,7 +496,7 @@
     boolean isScanRequired = false;
 
     if (isDimensionPresentInCurrentBlock) {
-      filterValues = dimColumnExecuterInfo.getFilterKeys();
+      filterValues = dimColumnExecutorInfo.getFilterKeys();
       chunkIndex = dimColumnEvaluatorInfo.getColumnIndexInMinMaxByteArray();
       // for no dictionary measure column comparison can be done
       // on the original data as like measure column
@@ -533,7 +533,7 @@
     boolean isScanRequired = false;
     for (int k = 0; k < filterValues.length; k++) {
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       int maxCompare =
           ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blkMaxVal);
@@ -562,7 +562,7 @@
         return true;
       }
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       Object data =
           DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(filterValues[k], dataType);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/MeasureColumnExecuterFilterInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/MeasureColumnExecutorFilterInfo.java
similarity index 97%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/MeasureColumnExecuterFilterInfo.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/MeasureColumnExecutorFilterInfo.java
index 1a325ea..61b9836 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/MeasureColumnExecuterFilterInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/MeasureColumnExecutorFilterInfo.java
@@ -35,10 +35,10 @@
  * Below class will be used to keep all the filter values based on data type
  * for measure column.
  * In this class there are multiple type of set is used to avoid conversion of
- * primitive type to primitive object to avoid gc which cause perofrmace degrade when
+ * primitive type to primitive object to avoid gc which cause performace degrade when
  * number of records are high
  */
-public class MeasureColumnExecuterFilterInfo {
+public class MeasureColumnExecutorFilterInfo {
 
   Object[] filterKeys;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/OrFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/OrFilterExecutorImpl.java
similarity index 70%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/OrFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/OrFilterExecutorImpl.java
index 8d3fd8f..01e3fdd 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/OrFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/OrFilterExecutorImpl.java
@@ -25,21 +25,21 @@
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 
-public class OrFilterExecuterImpl implements FilterExecuter {
+public class OrFilterExecutorImpl implements FilterExecutor {
 
-  private FilterExecuter leftExecuter;
-  private FilterExecuter rightExecuter;
+  private FilterExecutor leftExecutor;
+  private FilterExecutor rightExecutor;
 
-  public OrFilterExecuterImpl(FilterExecuter leftExecuter, FilterExecuter rightExecuter) {
-    this.leftExecuter = leftExecuter;
-    this.rightExecuter = rightExecuter;
+  public OrFilterExecutorImpl(FilterExecutor leftExecutor, FilterExecutor rightExecutor) {
+    this.leftExecutor = leftExecutor;
+    this.rightExecutor = rightExecutor;
   }
 
   @Override
   public BitSetGroup applyFilter(RawBlockletColumnChunks rawBlockletColumnChunks,
       boolean useBitsetPipeLine) throws FilterUnsupportedException, IOException {
-    BitSetGroup leftFilters = leftExecuter.applyFilter(rawBlockletColumnChunks, false);
-    BitSetGroup rightFilters = rightExecuter.applyFilter(rawBlockletColumnChunks, false);
+    BitSetGroup leftFilters = leftExecutor.applyFilter(rawBlockletColumnChunks, false);
+    BitSetGroup rightFilters = rightExecutor.applyFilter(rawBlockletColumnChunks, false);
     leftFilters.or(rightFilters);
     rawBlockletColumnChunks.setBitSetGroup(leftFilters);
     return leftFilters;
@@ -48,8 +48,8 @@
   @Override
   public BitSet prunePages(RawBlockletColumnChunks rawBlockletColumnChunks)
       throws FilterUnsupportedException, IOException {
-    BitSet leftFilters = leftExecuter.prunePages(rawBlockletColumnChunks);
-    BitSet rightFilters = rightExecuter.prunePages(rawBlockletColumnChunks);
+    BitSet leftFilters = leftExecutor.prunePages(rawBlockletColumnChunks);
+    BitSet rightFilters = rightExecutor.prunePages(rawBlockletColumnChunks);
     leftFilters.or(rightFilters);
     return leftFilters;
   }
@@ -57,22 +57,22 @@
   @Override
   public boolean applyFilter(RowIntf value, int dimOrdinalMax)
       throws FilterUnsupportedException, IOException {
-    return leftExecuter.applyFilter(value, dimOrdinalMax) ||
-        rightExecuter.applyFilter(value, dimOrdinalMax);
+    return leftExecutor.applyFilter(value, dimOrdinalMax) ||
+        rightExecutor.applyFilter(value, dimOrdinalMax);
   }
 
   @Override
   public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue,
       boolean[] isMinMaxSet) {
-    BitSet leftFilters = leftExecuter.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
-    BitSet rightFilters = rightExecuter.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
+    BitSet leftFilters = leftExecutor.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
+    BitSet rightFilters = rightExecutor.isScanRequired(blockMaxValue, blockMinValue, isMinMaxSet);
     leftFilters.or(rightFilters);
     return leftFilters;
   }
 
   @Override
   public void readColumnChunks(RawBlockletColumnChunks rawBlockletColumnChunks) throws IOException {
-    leftExecuter.readColumnChunks(rawBlockletColumnChunks);
-    rightExecuter.readColumnChunks(rawBlockletColumnChunks);
+    leftExecutor.readColumnChunks(rawBlockletColumnChunks);
+    rightExecutor.readColumnChunks(rawBlockletColumnChunks);
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecutorImpl.java
similarity index 93%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecutorImpl.java
index 664e046..e4daee3 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecutorImpl.java
@@ -44,7 +44,7 @@
  * for this Range. Also search the data block and set the required bitsets which falls within
  * the Range of the RANGE Expression.
  */
-public class RangeValueFilterExecuterImpl implements FilterExecuter {
+public class RangeValueFilterExecutorImpl implements FilterExecutor {
 
   private DimColumnResolvedFilterInfo dimColEvaluatorInfo;
   private Expression exp;
@@ -66,7 +66,7 @@
   private boolean isRangeFullyCoverBlock;
   private boolean isNaturalSorted;
 
-  public RangeValueFilterExecuterImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
+  public RangeValueFilterExecutorImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
       Expression exp, byte[][] filterRangeValues, SegmentProperties segmentProperties) {
 
     this.dimColEvaluatorInfo = dimColEvaluatorInfo;
@@ -272,8 +272,8 @@
     endBlockMaxisDefaultEnd = false;
 
     /*
-    For Undertsanding the below logic kept the value evaluation code intact.
-    int filterMinlessThanBlockMin =
+    For understanding the below logic kept the value evaluation code intact.
+    int filterMinLessThanBlockMin =
         ByteUtil.UnsafeComparer.INSTANCE.compareTo(blockMinValue, filterValues[0]);
     int filterMaxLessThanBlockMin =
         ByteUtil.UnsafeComparer.INSTANCE.compareTo(blockMinValue, filterValues[1]);
@@ -392,7 +392,7 @@
     DimensionRawColumnChunk rawColumnChunk =
         blockChunkHolder.getDimensionRawColumnChunks()[chunkIndex];
     BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
-    FilterExecuter filterExecuter = null;
+    FilterExecutor filterExecutor = null;
     boolean isExclude = false;
     for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
       if (rawColumnChunk.getMaxValues() != null) {
@@ -407,20 +407,20 @@
             BitSet bitSet;
             DimensionColumnPage dimensionColumnPage = rawColumnChunk.decodeColumnPage(i);
             if (null != rawColumnChunk.getLocalDictionary()) {
-              if (null == filterExecuter) {
-                filterExecuter = FilterUtil
+              if (null == filterExecutor) {
+                filterExecutor = FilterUtil
                     .getFilterExecutorForRangeFilters(rawColumnChunk, exp, isNaturalSorted);
-                if (filterExecuter instanceof ExcludeFilterExecuterImpl) {
+                if (filterExecutor instanceof ExcludeFilterExecutorImpl) {
                   isExclude = true;
                 }
               }
               if (!isExclude) {
-                bitSet = ((IncludeFilterExecuterImpl) filterExecuter)
+                bitSet = ((IncludeFilterExecutorImpl) filterExecutor)
                     .getFilteredIndexes(dimensionColumnPage,
                         rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                         blockChunkHolder.getBitSetGroup(), i);
               } else {
-                bitSet = ((ExcludeFilterExecuterImpl) filterExecuter)
+                bitSet = ((ExcludeFilterExecutorImpl) filterExecutor)
                     .getFilteredIndexes(dimensionColumnPage,
                         rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                         blockChunkHolder.getBitSetGroup(), i);
@@ -442,26 +442,26 @@
   }
 
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
+      int numberOfRows) {
     if (dimensionColumnPage.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numerOfRows);
+      return setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
     }
-    return setFilterdIndexToBitSet(dimensionColumnPage, numerOfRows);
+    return setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows);
   }
 
   /**
    * Method will scan the block and finds the range start index from which all members
    * will be considered for applying range filters. this method will be called if the
    * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
+   * accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      DimensionColumnPage dimensionColumnPage, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
+      DimensionColumnPage dimensionColumnPage, int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     int start = 0;
     int startIndex = 0;
     int startMin = 0;
@@ -473,17 +473,17 @@
     // Get the Min Value
     if (!startBlockMinIsDefaultStart) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               filterValues[0], greaterThanExp);
       if (greaterThanExp && start >= 0) {
         start = CarbonUtil
             .nextGreaterValueToTarget(start, dimensionColumnPage, filterValues[0],
-                numerOfRows);
+                numberOfRows);
       }
 
       if (start < 0) {
         start = -(start + 1);
-        if (start == numerOfRows) {
+        if (start == numberOfRows) {
           start = start - 1;
         }
         // Method will compare the tentative index value after binary search, this tentative
@@ -503,7 +503,7 @@
     // Get the Max value
     if (!endBlockMaxisDefaultEnd) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               filterValues[1], lessThanEqualExp);
 
       if (lessThanExp && start >= 0) {
@@ -513,7 +513,7 @@
 
       if (start < 0) {
         start = -(start + 1);
-        if (start == numerOfRows) {
+        if (start == numberOfRows) {
           start = start - 1;
         }
         // In case the start is less than 0, then positive value of start is pointing to the next
@@ -526,7 +526,7 @@
       }
       endMax = start;
     } else {
-      endMax = numerOfRows - 1;
+      endMax = numberOfRows - 1;
     }
 
     for (int j = startMin; j <= endMax; j++) {
@@ -535,7 +535,7 @@
 
     // Binary Search cannot be done on '@NU#LL$!", so need to check and compare for null on
     // matching row.
-    if (dimensionColumnPage.isNoDicitionaryColumn() && !dimensionColumnPage.isAdaptiveEncoded()) {
+    if (dimensionColumnPage.isNoDictionaryColumn() && !dimensionColumnPage.isAdaptiveEncoded()) {
       updateForNoDictionaryColumn(startMin, endMax, dimensionColumnPage, bitSet);
     }
     return bitSet;
@@ -555,15 +555,15 @@
    * Method will scan the block and finds the range start index from which all
    * members will be considered for applying range filters. this method will
    * be called if the column is sorted default so column index
-   * mapping will be present for accesaing the members from the block.
+   * mapping will be present for accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     // if (dimensionColumnPage instanceof FixedLengthDimensionColumnPage) {
     byte[][] filterValues = this.filterRangesValues;
     if (dimensionColumnPage.isExplicitSorted()) {
@@ -576,18 +576,18 @@
       if (!startBlockMinIsDefaultStart) {
 
         start = CarbonUtil
-            .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+            .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
                 filterValues[0], greaterThanExp);
 
         if (greaterThanExp && start >= 0) {
           start = CarbonUtil
               .nextGreaterValueToTarget(start, dimensionColumnPage, filterValues[0],
-                  numerOfRows);
+                  numberOfRows);
         }
 
         if (start < 0) {
           start = -(start + 1);
-          if (start == numerOfRows) {
+          if (start == numberOfRows) {
             start = start - 1;
           }
           // Method will compare the tentative index value after binary search, this tentative
@@ -605,7 +605,7 @@
 
       if (!endBlockMaxisDefaultEnd) {
         start = CarbonUtil
-            .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+            .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
                 filterValues[1], lessThanEqualExp);
 
         if (lessThanExp && start >= 0) {
@@ -615,7 +615,7 @@
 
         if (start < 0) {
           start = -(start + 1);
-          if (start == numerOfRows) {
+          if (start == numberOfRows) {
             start = start - 1;
           }
           // In case the start is less than 0, then positive value of start is pointing to the next
@@ -627,7 +627,7 @@
         }
         endMax = start;
       } else {
-        endMax = numerOfRows - 1;
+        endMax = numberOfRows - 1;
       }
 
       for (int j = startMin; j <= endMax; j++) {
@@ -636,7 +636,7 @@
 
       // Binary Search cannot be done on '@NU#LL$!", so need to check and compare for null on
       // matching row.
-      if (dimensionColumnPage.isNoDicitionaryColumn()) {
+      if (dimensionColumnPage.isNoDictionaryColumn()) {
         updateForNoDictionaryColumn(startMin, endMax, dimensionColumnPage, bitSet);
       }
     } else {
@@ -653,10 +653,10 @@
       // evaluate result for lower range value first and then perform and operation in the
       // upper range value in order to compute the final result
       bitSet = evaluateGreaterThanFilterForUnsortedColumn(dimensionColumnPage, filterValues[0],
-          numerOfRows);
+          numberOfRows);
       BitSet upperRangeBitSet =
           evaluateLessThanFilterForUnsortedColumn(dimensionColumnPage, filterValues[1],
-              numerOfRows);
+              numberOfRows);
       bitSet.and(upperRangeBitSet);
       FilterUtil.removeNullValues(dimensionColumnPage, bitSet, defaultValue);
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RestructureEvaluatorImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RestructureEvaluatorImpl.java
index 452e07f..fd685f8 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RestructureEvaluatorImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RestructureEvaluatorImpl.java
@@ -40,7 +40,7 @@
 /**
  * Abstract class for restructure
  */
-public abstract class RestructureEvaluatorImpl implements FilterExecuter {
+public abstract class RestructureEvaluatorImpl implements FilterExecutor {
 
   /**
    * This method will check whether a default value for the non-existing column is present
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
similarity index 96%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
index d0429e2..8a8a841 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
@@ -58,10 +58,10 @@
 
 import org.apache.log4j.Logger;
 
-public class RowLevelFilterExecuterImpl implements FilterExecuter {
+public class RowLevelFilterExecutorImpl implements FilterExecutor {
 
   private static final Logger LOGGER =
-      LogServiceFactory.getLogService(RowLevelFilterExecuterImpl.class.getName());
+      LogServiceFactory.getLogService(RowLevelFilterExecutorImpl.class.getName());
   List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
   List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
   protected Expression exp;
@@ -100,7 +100,7 @@
    */
   boolean isNaturalSorted;
 
-  public RowLevelFilterExecuterImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+  public RowLevelFilterExecutorImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, SegmentProperties segmentProperties,
       Map<Integer, GenericQueryType> complexDimensionInfoMap) {
@@ -226,7 +226,7 @@
       BitSet set = new BitSet(numberOfRows[i]);
       RowIntf row = new RowImpl();
       BitSet prvBitset = null;
-      // if bitset pipe line is enabled then use rowid from previous bitset
+      // if bitset pipe line is enabled then use row id from previous bitset
       // otherwise use older flow
       if (!useBitsetPipeLine ||
           null == rawBlockletColumnChunks.getBitSetGroup() ||
@@ -234,9 +234,9 @@
           rawBlockletColumnChunks.getBitSetGroup().getBitSet(i).isEmpty()) {
         for (int index = 0; index < numberOfRows[i]; index++) {
           createRow(rawBlockletColumnChunks, row, i, index);
-          Boolean rslt = false;
+          Boolean result = false;
           try {
-            rslt = exp.evaluate(row).getBoolean();
+            result = exp.evaluate(row).getBoolean();
           }
           // Any invalid member while evaluation shall be ignored, system will log the
           // error only once since all rows the evaluation happens so inorder to avoid
@@ -244,7 +244,7 @@
           catch (FilterIllegalMemberException e) {
             FilterUtil.logError(e, false);
           }
-          if (null != rslt && rslt) {
+          if (null != result && result) {
             set.set(index);
           }
         }
@@ -253,13 +253,13 @@
         for (int index = prvBitset.nextSetBit(0);
              index >= 0; index = prvBitset.nextSetBit(index + 1)) {
           createRow(rawBlockletColumnChunks, row, i, index);
-          Boolean rslt = false;
+          Boolean result = false;
           try {
-            rslt = exp.evaluate(row).getBoolean();
+            result = exp.evaluate(row).getBoolean();
           } catch (FilterIllegalMemberException e) {
             FilterUtil.logError(e, false);
           }
-          if (null != rslt && rslt) {
+          if (null != result && result) {
             set.set(index);
           }
         }
@@ -310,7 +310,7 @@
         continue;
       }
       if (!dimColumnEvaluatorInfo.getDimension().getDataType().isComplexType()) {
-        if (!dimColumnEvaluatorInfo.isDimensionExistsInCurrentSilce()) {
+        if (!dimColumnEvaluatorInfo.isDimensionExistsInCurrentSlice()) {
           record[index] = dimColumnEvaluatorInfo.getDimension().getDefaultValue();
         }
         byte[] memberBytes = (byte[]) value.getVal(index);
@@ -371,7 +371,7 @@
         continue;
       }
       if (!dimColumnEvaluatorInfo.getDimension().getDataType().isComplexType()) {
-        if (!dimColumnEvaluatorInfo.isDimensionExistsInCurrentSilce()) {
+        if (!dimColumnEvaluatorInfo.isDimensionExistsInCurrentSlice()) {
           record[dimColumnEvaluatorInfo.getRowIndex()] =
               dimColumnEvaluatorInfo.getDimension().getDefaultValue();
         }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGreaterThanEqualFilterExecutorImpl.java
similarity index 92%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGreaterThanEqualFilterExecutorImpl.java
index 7d245d9..aebcd25 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGreaterThanEqualFilterExecutorImpl.java
@@ -46,7 +46,7 @@
 import org.apache.carbondata.core.util.comparator.Comparator;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
-public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilterExecuterImpl {
+public class RowLevelRangeGreaterThanEqualFilterExecutorImpl extends RowLevelFilterExecutorImpl {
 
   private byte[][] filterRangeValues;
   private Object[] msrFilterRangeValues;
@@ -56,7 +56,7 @@
    */
   private boolean isDefaultValuePresentInFilter;
 
-  RowLevelRangeGrtrThanEquaToFilterExecuterImpl(
+  RowLevelRangeGreaterThanEqualFilterExecutorImpl(
       List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
@@ -158,7 +158,7 @@
     boolean isScanRequired = false;
     for (int k = 0; k < filterValues.length; k++) {
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       int maxCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMaxValue);
       // if any filter value is in range than this block needs to be
@@ -181,7 +181,7 @@
         return true;
       }
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       Object data =
           DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(filterValues[k], dataType);
@@ -234,7 +234,7 @@
       DimensionRawColumnChunk rawColumnChunk =
           rawBlockletColumnChunks.getDimensionRawColumnChunks()[chunkIndex];
       BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
-      FilterExecuter filterExecuter = null;
+      FilterExecutor filterExecutor = null;
       boolean isExclude = false;
       for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
         if (rawColumnChunk.getMaxValues() != null) {
@@ -249,20 +249,20 @@
               DimensionColumnPage dimensionColumnPage = rawColumnChunk.decodeColumnPage(i);
               BitSet bitSet = null;
               if (null != rawColumnChunk.getLocalDictionary()) {
-                if (null == filterExecuter) {
-                  filterExecuter = FilterUtil
+                if (null == filterExecutor) {
+                  filterExecutor = FilterUtil
                       .getFilterExecutorForRangeFilters(rawColumnChunk, exp, isNaturalSorted);
-                  if (filterExecuter instanceof ExcludeFilterExecuterImpl) {
+                  if (filterExecutor instanceof ExcludeFilterExecutorImpl) {
                     isExclude = true;
                   }
                 }
                 if (!isExclude) {
-                  bitSet = ((IncludeFilterExecuterImpl) filterExecuter)
+                  bitSet = ((IncludeFilterExecutorImpl) filterExecutor)
                       .getFilteredIndexes(dimensionColumnPage,
                           rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                           rawBlockletColumnChunks.getBitSetGroup(), i);
                 } else {
-                  bitSet = ((ExcludeFilterExecuterImpl) filterExecuter)
+                  bitSet = ((ExcludeFilterExecutorImpl) filterExecutor)
                       .getFilteredIndexes(dimensionColumnPage,
                           rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                           rawBlockletColumnChunks.getBitSetGroup(), i);
@@ -416,8 +416,8 @@
   }
 
   private BitSet getFilteredIndexesForMeasures(ColumnPage columnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     Object[] filterValues = this.msrFilterRangeValues;
     DataType msrType = msrColEvalutorInfoList.get(0).getType();
     SerializableComparator comparator = Comparator.getComparatorByDataTypeForMeasure(msrType);
@@ -429,7 +429,7 @@
         }
         continue;
       }
-      for (int startIndex = 0; startIndex < numerOfRows; startIndex++) {
+      for (int startIndex = 0; startIndex < numberOfRows; startIndex++) {
         if (!nullBitSet.get(startIndex)) {
           Object msrValue = DataTypeUtil
               .getMeasureObjectBasedOnDataType(columnPage, startIndex,
@@ -446,12 +446,12 @@
   }
 
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
+      int numberOfRows) {
     BitSet bitSet = null;
     if (dimensionColumnPage.isExplicitSorted()) {
-      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numerOfRows);
+      bitSet = setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
     } else {
-      bitSet = setFilterdIndexToBitSet(dimensionColumnPage, numerOfRows);
+      bitSet = setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows);
     }
     byte[] defaultValue = null;
     if (dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.STRING) {
@@ -462,7 +462,7 @@
     } else if (!dimensionColumnPage.isAdaptiveEncoded()) {
       defaultValue = CarbonCommonConstants.EMPTY_BYTE_ARRAY;
     }
-    if (dimensionColumnPage.isNoDicitionaryColumn() ||
+    if (dimensionColumnPage.isNoDictionaryColumn() ||
         dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.DATE) {
       FilterUtil.removeNullValues(dimensionColumnPage, bitSet, defaultValue);
     }
@@ -473,26 +473,26 @@
    * Method will scan the block and finds the range start index from which all members
    * will be considered for applying range filters. this method will be called if the
    * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
+   * accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      DimensionColumnPage dimensionColumnPage, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
+      DimensionColumnPage dimensionColumnPage, int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     int start = 0;
     int last = 0;
     int startIndex = 0;
     byte[][] filterValues = this.filterRangeValues;
     for (int i = 0; i < filterValues.length; i++) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               filterValues[i], false);
       if (start < 0) {
         start = -(start + 1);
-        if (start == numerOfRows) {
+        if (start == numberOfRows) {
           start = start - 1;
         }
         // Method will compare the tentative index value after binary search, this tentative
@@ -505,12 +505,12 @@
         }
       }
       last = start;
-      for (int j = start; j < numerOfRows; j++) {
+      for (int j = start; j < numberOfRows; j++) {
         bitSet.set(dimensionColumnPage.getInvertedIndex(j));
         last++;
       }
       startIndex = last;
-      if (startIndex >= numerOfRows) {
+      if (startIndex >= numberOfRows) {
         break;
       }
     }
@@ -521,15 +521,15 @@
    * Method will scan the block and finds the range start index from which all
    * members will be considered for applying range filters. this method will
    * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
+   * mapping will be present for accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     byte[][] filterValues = this.filterRangeValues;
     // binary search can only be applied if column is sorted
     if (isNaturalSorted && dimensionColumnPage.isExplicitSorted()) {
@@ -539,10 +539,10 @@
       for (int k = 0; k < filterValues.length; k++) {
         start = CarbonUtil
             .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex,
-                numerOfRows - 1, filterValues[k], false);
+                numberOfRows - 1, filterValues[k], false);
         if (start < 0) {
           start = -(start + 1);
-          if (start == numerOfRows) {
+          if (start == numberOfRows) {
             start = start - 1;
           }
           // Method will compare the tentative index value after binary search, this tentative
@@ -554,18 +554,18 @@
         }
 
         last = start;
-        for (int j = start; j < numerOfRows; j++) {
+        for (int j = start; j < numberOfRows; j++) {
           bitSet.set(j);
           last++;
         }
         startIndex = last;
-        if (startIndex >= numerOfRows) {
+        if (startIndex >= numberOfRows) {
           break;
         }
       }
     } else {
       for (int k = 0; k < filterValues.length; k++) {
-        for (int i = 0; i < numerOfRows; i++) {
+        for (int i = 0; i < numberOfRows; i++) {
           if (ByteUtil.compare(dimensionColumnPage.getChunkData(i), filterValues[k]) >= 0) {
             bitSet.set(i);
           }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGreaterThanFilterExecutorImpl.java
similarity index 91%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGreaterThanFilterExecutorImpl.java
index 7cef5f4..e06414e 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGreaterThanFilterExecutorImpl.java
@@ -46,22 +46,22 @@
 import org.apache.carbondata.core.util.comparator.Comparator;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
-public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecuterImpl {
+public class RowLevelRangeGreaterThanFilterExecutorImpl extends RowLevelFilterExecutorImpl {
   private byte[][] filterRangeValues;
   private Object[] msrFilterRangeValues;
   private SerializableComparator comparator;
 
-
   /**
    * flag to check whether default values is present in the filter value list
    */
   private boolean isDefaultValuePresentInFilter;
 
-  RowLevelRangeGrtThanFiterExecuterImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+  RowLevelRangeGreaterThanFilterExecutorImpl(
+      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+      List<MeasureColumnResolvedFilterInfo> msrColEvoluatorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
       Object[] msrFilterRangeValues, SegmentProperties segmentProperties) {
-    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
+    super(dimColEvaluatorInfoList, msrColEvoluatorInfoList, exp, tableIdentifier, segmentProperties,
         null);
     this.filterRangeValues = filterRangeValues;
     this.msrFilterRangeValues = msrFilterRangeValues;
@@ -216,7 +216,7 @@
     boolean isScanRequired = false;
     for (int k = 0; k < filterValues.length; k++) {
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       int maxCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMaxValue);
       // if any filter value is in range than this block needs to be
@@ -239,7 +239,7 @@
         return true;
       }
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       Object data =
           DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(filterValues[k], dataType);
@@ -291,7 +291,7 @@
       DimensionRawColumnChunk rawColumnChunk =
           rawBlockletColumnChunks.getDimensionRawColumnChunks()[chunkIndex];
       BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
-      FilterExecuter filterExecuter = null;
+      FilterExecutor filterExecutor = null;
       boolean isExclude = false;
       for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
         if (rawColumnChunk.getMaxValues() != null) {
@@ -306,20 +306,20 @@
               BitSet bitSet = null;
               DimensionColumnPage dimensionColumnPage = rawColumnChunk.decodeColumnPage(i);
               if (null != rawColumnChunk.getLocalDictionary()) {
-                if (null == filterExecuter) {
-                  filterExecuter = FilterUtil
+                if (null == filterExecutor) {
+                  filterExecutor = FilterUtil
                       .getFilterExecutorForRangeFilters(rawColumnChunk, exp, isNaturalSorted);
-                  if (filterExecuter instanceof ExcludeFilterExecuterImpl) {
+                  if (filterExecutor instanceof ExcludeFilterExecutorImpl) {
                     isExclude = true;
                   }
                 }
                 if (!isExclude) {
-                  bitSet = ((IncludeFilterExecuterImpl) filterExecuter)
+                  bitSet = ((IncludeFilterExecutorImpl) filterExecutor)
                       .getFilteredIndexes(dimensionColumnPage,
                           rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                           rawBlockletColumnChunks.getBitSetGroup(), i);
                 } else {
-                  bitSet = ((ExcludeFilterExecuterImpl) filterExecuter)
+                  bitSet = ((ExcludeFilterExecutorImpl) filterExecutor)
                       .getFilteredIndexes(dimensionColumnPage,
                           rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                           rawBlockletColumnChunks.getBitSetGroup(), i);
@@ -413,8 +413,8 @@
   }
 
   private BitSet getFilteredIndexesForMeasures(ColumnPage columnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     Object[] filterValues = this.msrFilterRangeValues;
     DataType msrType = msrColEvalutorInfoList.get(0).getType();
     SerializableComparator comparator = Comparator.getComparatorByDataTypeForMeasure(msrType);
@@ -426,7 +426,7 @@
         }
         continue;
       }
-      for (int startIndex = 0; startIndex < numerOfRows; startIndex++) {
+      for (int startIndex = 0; startIndex < numberOfRows; startIndex++) {
         if (!nullBitSet.get(startIndex)) {
           Object msrValue = DataTypeUtil
               .getMeasureObjectBasedOnDataType(columnPage, startIndex,
@@ -443,12 +443,12 @@
   }
 
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
+      int numberOfRows) {
     BitSet bitSet = null;
     if (dimensionColumnPage.isExplicitSorted()) {
-      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numerOfRows);
+      bitSet = setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows);
     } else {
-      bitSet = setFilterdIndexToBitSet(dimensionColumnPage, numerOfRows);
+      bitSet = setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows);
     }
     byte[] defaultValue = null;
     if (dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.STRING) {
@@ -459,7 +459,7 @@
     } else if (!dimensionColumnPage.isAdaptiveEncoded()) {
       defaultValue = CarbonCommonConstants.EMPTY_BYTE_ARRAY;
     }
-    if (dimensionColumnPage.isNoDicitionaryColumn() ||
+    if (dimensionColumnPage.isNoDictionaryColumn() ||
         dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.DATE) {
       FilterUtil.removeNullValues(dimensionColumnPage, bitSet, defaultValue);
     }
@@ -470,27 +470,27 @@
    * Method will scan the block and finds the range start index from which all members
    * will be considered for applying range filters. this method will be called if the
    * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
+   * accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      DimensionColumnPage dimensionColumnPage, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
+      DimensionColumnPage dimensionColumnPage, int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     int start = 0;
     int last = 0;
     int startIndex = 0;
     byte[][] filterValues = this.filterRangeValues;
     for (int i = 0; i < filterValues.length; i++) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               filterValues[i], true);
       if (start >= 0) {
         start = CarbonUtil
             .nextGreaterValueToTarget(start, dimensionColumnPage, filterValues[i],
-                numerOfRows);
+                numberOfRows);
       }
       // Logic will handle the case where the range filter member is not present in block
       // in this case the binary search will return the index from where the bit sets will be
@@ -498,7 +498,7 @@
       // from the next element which is greater than filter member.
       if (start < 0) {
         start = -(start + 1);
-        if (start == numerOfRows) {
+        if (start == numberOfRows) {
           start = start - 1;
         }
         // Method will compare the tentative index value after binary search, this tentative
@@ -512,12 +512,12 @@
       }
 
       last = start;
-      for (int j = start; j < numerOfRows; j++) {
+      for (int j = start; j < numberOfRows; j++) {
         bitSet.set(dimensionColumnPage.getInvertedIndex(j));
         last++;
       }
       startIndex = last;
-      if (startIndex >= numerOfRows) {
+      if (startIndex >= numberOfRows) {
         break;
       }
     }
@@ -529,15 +529,15 @@
    * Method will scan the block and finds the range start index from which all
    * members will be considered for applying range filters. this method will
    * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
+   * mapping will be present for accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     byte[][] filterValues = this.filterRangeValues;
     // binary search can only be applied if column is sorted
     if (isNaturalSorted && dimensionColumnPage.isExplicitSorted()) {
@@ -547,15 +547,15 @@
       for (int k = 0; k < filterValues.length; k++) {
         start = CarbonUtil
             .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex,
-                numerOfRows - 1, filterValues[k], true);
+                numberOfRows - 1, filterValues[k], true);
         if (start >= 0) {
           start = CarbonUtil
               .nextGreaterValueToTarget(start, dimensionColumnPage, filterValues[k],
-                  numerOfRows);
+                  numberOfRows);
         }
         if (start < 0) {
           start = -(start + 1);
-          if (start == numerOfRows) {
+          if (start == numberOfRows) {
             start = start - 1;
           }
           // Method will compare the tentative index value after binary search, this tentative
@@ -566,18 +566,18 @@
           }
         }
         last = start;
-        for (int j = start; j < numerOfRows; j++) {
+        for (int j = start; j < numberOfRows; j++) {
           bitSet.set(j);
           last++;
         }
         startIndex = last;
-        if (startIndex >= numerOfRows) {
+        if (startIndex >= numberOfRows) {
           break;
         }
       }
     } else {
       for (int k = 0; k < filterValues.length; k++) {
-        for (int i = 0; i < numerOfRows; i++) {
+        for (int i = 0; i < numberOfRows; i++) {
           if (ByteUtil.compare(dimensionColumnPage.getChunkData(i), filterValues[k]) > 0) {
             bitSet.set(i);
           }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecutorImpl.java
similarity index 92%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecutorImpl.java
index 23ff13a..5dd50a0 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecutorImpl.java
@@ -46,7 +46,7 @@
 import org.apache.carbondata.core.util.comparator.Comparator;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
-public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilterExecuterImpl {
+public class RowLevelRangeLessThanEqualFilterExecutorImpl extends RowLevelFilterExecutorImpl {
   protected byte[][] filterRangeValues;
   protected Object[] msrFilterRangeValues;
   protected SerializableComparator comparator;
@@ -55,7 +55,7 @@
    * flag to check whether default values is present in the filter value list
    */
   private boolean isDefaultValuePresentInFilter;
-  public RowLevelRangeLessThanEqualFilterExecuterImpl(
+  public RowLevelRangeLessThanEqualFilterExecutorImpl(
       List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
@@ -160,7 +160,7 @@
       int minCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMinValue);
 
       // if any filter applied is not in range of min and max of block
-      // then since its a less than equal to fiter validate whether the block
+      // then since its a less than equal to filter validate whether the block
       // min range is less than equal to applied filter member
       if (minCompare >= 0) {
         isScanRequired = true;
@@ -180,7 +180,7 @@
         return true;
       }
       // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
+      // max>filterValue>min
       // so filter-max should be negative
       Object data =
           DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(filterValues[k], dataType);
@@ -233,7 +233,7 @@
       DimensionRawColumnChunk rawColumnChunk =
           rawBlockletColumnChunks.getDimensionRawColumnChunks()[chunkIndex];
       BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
-      FilterExecuter filterExecuter = null;
+      FilterExecutor filterExecutor = null;
       boolean isExclude = false;
       for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
         if (rawColumnChunk.getMinValues() != null) {
@@ -241,20 +241,20 @@
             BitSet bitSet;
             DimensionColumnPage dimensionColumnPage = rawColumnChunk.decodeColumnPage(i);
             if (null != rawColumnChunk.getLocalDictionary()) {
-              if (null == filterExecuter) {
-                filterExecuter = FilterUtil
+              if (null == filterExecutor) {
+                filterExecutor = FilterUtil
                     .getFilterExecutorForRangeFilters(rawColumnChunk, exp, isNaturalSorted);
-                if (filterExecuter instanceof ExcludeFilterExecuterImpl) {
+                if (filterExecutor instanceof ExcludeFilterExecutorImpl) {
                   isExclude = true;
                 }
               }
               if (!isExclude) {
-                bitSet = ((IncludeFilterExecuterImpl) filterExecuter)
+                bitSet = ((IncludeFilterExecutorImpl) filterExecutor)
                     .getFilteredIndexes(dimensionColumnPage,
                         rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                         rawBlockletColumnChunks.getBitSetGroup(), i);
               } else {
-                bitSet = ((ExcludeFilterExecuterImpl) filterExecuter)
+                bitSet = ((ExcludeFilterExecutorImpl) filterExecutor)
                     .getFilteredIndexes(dimensionColumnPage,
                         rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                         rawBlockletColumnChunks.getBitSetGroup(), i);
@@ -394,8 +394,8 @@
   }
 
   private BitSet getFilteredIndexesForMeasures(ColumnPage columnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     Object[] filterValues = this.msrFilterRangeValues;
     DataType msrType = msrColEvalutorInfoList.get(0).getType();
     SerializableComparator comparator = Comparator.getComparatorByDataTypeForMeasure(msrType);
@@ -407,7 +407,7 @@
         }
         continue;
       }
-      for (int startIndex = 0; startIndex < numerOfRows; startIndex++) {
+      for (int startIndex = 0; startIndex < numberOfRows; startIndex++) {
         if (!nullBitSet.get(startIndex)) {
           Object msrValue = DataTypeUtil
               .getMeasureObjectBasedOnDataType(columnPage, startIndex,
@@ -424,7 +424,7 @@
   }
 
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
+      int numberOfRows) {
     byte[] defaultValue = null;
     if (dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.DATE) {
       defaultValue = FilterUtil
@@ -434,16 +434,16 @@
     }
     BitSet bitSet = null;
     if (dimensionColumnPage.isExplicitSorted()) {
-      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numerOfRows,
+      bitSet = setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows,
           dimensionColumnPage.isAdaptiveEncoded() ? null : defaultValue);
     } else {
-      bitSet = setFilterdIndexToBitSet(dimensionColumnPage, numerOfRows,
+      bitSet = setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows,
           dimensionColumnPage.isAdaptiveEncoded() ? null : defaultValue);
     }
     if (dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.STRING) {
       defaultValue = CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY;
     }
-    if (dimensionColumnPage.isNoDicitionaryColumn() ||
+    if (dimensionColumnPage.isNoDictionaryColumn() ||
         dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.DATE) {
       FilterUtil.removeNullValues(dimensionColumnPage, bitSet, defaultValue);
     }
@@ -454,16 +454,16 @@
    * Method will scan the block and finds the range start index from which all members
    * will be considered for applying range filters. this method will be called if the
    * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
+   * accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      DimensionColumnPage dimensionColumnPage, int numerOfRows,
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
+      DimensionColumnPage dimensionColumnPage, int numberOfRows,
       byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
+    BitSet bitSet = new BitSet(numberOfRows);
     int start = 0;
     int last = 0;
     int skip = 0;
@@ -472,12 +472,12 @@
     //find the number of default values to skip the null value in case of direct dictionary
     if (null != defaultValue) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               defaultValue, true);
       if (start < 0) {
         skip = -(start + 1);
         // end of block
-        if (skip == numerOfRows) {
+        if (skip == numberOfRows) {
           return bitSet;
         }
       } else {
@@ -489,11 +489,11 @@
     }
     for (int i = 0; i < filterValues.length; i++) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               filterValues[i], true);
       if (start < 0) {
         start = -(start + 1);
-        if (start >= numerOfRows) {
+        if (start >= numberOfRows) {
           start = start - 1;
         }
         // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
@@ -522,16 +522,16 @@
    * Method will scan the block and finds the range start index from which all
    * members will be considered for applying range filters. this method will
    * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
+   * mapping will be present for accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @param defaultValue
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows, byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+      int numberOfRows, byte[] defaultValue) {
+    BitSet bitSet = new BitSet(numberOfRows);
     byte[][] filterValues = this.filterRangeValues;
     // binary search can only be applied if column is sorted
     if (isNaturalSorted && dimensionColumnPage.isExplicitSorted()) {
@@ -543,11 +543,11 @@
       if (null != defaultValue) {
         start = CarbonUtil
             .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex,
-                numerOfRows - 1, defaultValue, true);
+                numberOfRows - 1, defaultValue, true);
         if (start < 0) {
           skip = -(start + 1);
           // end of block
-          if (skip == numerOfRows) {
+          if (skip == numberOfRows) {
             return bitSet;
           }
         } else {
@@ -560,10 +560,10 @@
       for (int k = 0; k < filterValues.length; k++) {
         start = CarbonUtil
             .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex,
-                numerOfRows - 1, filterValues[k], true);
+                numberOfRows - 1, filterValues[k], true);
         if (start < 0) {
           start = -(start + 1);
-          if (start >= numerOfRows) {
+          if (start >= numberOfRows) {
             start = start - 1;
           }
           // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
@@ -585,7 +585,7 @@
       }
     } else {
       for (int k = 0; k < filterValues.length; k++) {
-        for (int i = 0; i < numerOfRows; i++) {
+        for (int i = 0; i < numberOfRows; i++) {
           if (ByteUtil.compare(dimensionColumnPage.getChunkData(i), filterValues[k]) <= 0) {
             bitSet.set(i);
           }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecutorImpl.java
similarity index 91%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecutorImpl.java
index 054f04d..0c29675 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecutorImpl.java
@@ -46,7 +46,7 @@
 import org.apache.carbondata.core.util.comparator.Comparator;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
-public class RowLevelRangeLessThanFilterExecuterImpl extends RowLevelFilterExecuterImpl {
+public class RowLevelRangeLessThanFilterExecutorImpl extends RowLevelFilterExecutorImpl {
   private byte[][] filterRangeValues;
   private Object[] msrFilterRangeValues;
   private SerializableComparator comparator;
@@ -55,16 +55,16 @@
    * flag to check whether default values is present in the filter value list
    */
   private boolean isDefaultValuePresentInFilter;
-  public RowLevelRangeLessThanFilterExecuterImpl(
+  public RowLevelRangeLessThanFilterExecutorImpl(
       List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+      List<MeasureColumnResolvedFilterInfo> msrColEvaluatorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
       Object[] msrFilterRangeValues, SegmentProperties segmentProperties) {
-    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
+    super(dimColEvaluatorInfoList, msrColEvaluatorInfoList, exp, tableIdentifier, segmentProperties,
         null);
     this.filterRangeValues = filterRangeValues;
     this.msrFilterRangeValues = msrFilterRangeValues;
-    if (!msrColEvalutorInfoList.isEmpty()) {
+    if (!msrColEvaluatorInfoList.isEmpty()) {
       CarbonMeasure measure = this.msrColEvalutorInfoList.get(0).getMeasure();
       comparator = Comparator.getComparatorByDataTypeForMeasure(measure.getDataType());
     }
@@ -160,7 +160,7 @@
       int minCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMinValue);
 
       // if any filter applied is not in range of min and max of block
-      // then since its a less than equal to fiter validate whether the block
+      // then since its a less than equal to filter validate whether the block
       // min range is less than equal to applied filter member
       if (minCompare > 0) {
         isScanRequired = true;
@@ -230,7 +230,7 @@
       DimensionRawColumnChunk rawColumnChunk =
           rawBlockletColumnChunks.getDimensionRawColumnChunks()[chunkIndex];
       BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
-      FilterExecuter filterExecuter = null;
+      FilterExecutor filterExecutor = null;
       boolean isExclude = false;
       for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
         if (rawColumnChunk.getMinValues() != null) {
@@ -238,20 +238,20 @@
             BitSet bitSet;
             DimensionColumnPage dimensionColumnPage = rawColumnChunk.decodeColumnPage(i);
             if (null != rawColumnChunk.getLocalDictionary()) {
-              if (null == filterExecuter) {
-                filterExecuter = FilterUtil
+              if (null == filterExecutor) {
+                filterExecutor = FilterUtil
                     .getFilterExecutorForRangeFilters(rawColumnChunk, exp, isNaturalSorted);
-                if (filterExecuter instanceof ExcludeFilterExecuterImpl) {
+                if (filterExecutor instanceof ExcludeFilterExecutorImpl) {
                   isExclude = true;
                 }
               }
               if (!isExclude) {
-                bitSet = ((IncludeFilterExecuterImpl) filterExecuter)
+                bitSet = ((IncludeFilterExecutorImpl) filterExecutor)
                     .getFilteredIndexes(dimensionColumnPage,
                         rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                         rawBlockletColumnChunks.getBitSetGroup(), i);
               } else {
-                bitSet = ((ExcludeFilterExecuterImpl) filterExecuter)
+                bitSet = ((ExcludeFilterExecutorImpl) filterExecutor)
                     .getFilteredIndexes(dimensionColumnPage,
                         rawColumnChunk.getRowCount()[i], useBitsetPipeLine,
                         rawBlockletColumnChunks.getBitSetGroup(), i);
@@ -389,8 +389,8 @@
   }
 
   private BitSet getFilteredIndexesForMeasures(ColumnPage columnPage,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
+      int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
     Object[] filterValues = this.msrFilterRangeValues;
     DataType msrType = msrColEvalutorInfoList.get(0).getType();
     SerializableComparator comparator = Comparator.getComparatorByDataTypeForMeasure(msrType);
@@ -402,7 +402,7 @@
         }
         continue;
       }
-      for (int startIndex = 0; startIndex < numerOfRows; startIndex++) {
+      for (int startIndex = 0; startIndex < numberOfRows; startIndex++) {
         if (!nullBitSet.get(startIndex)) {
           Object msrValue = DataTypeUtil
               .getMeasureObjectBasedOnDataType(columnPage, startIndex,
@@ -419,7 +419,7 @@
   }
 
   private BitSet getFilteredIndexes(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows) {
+      int numberOfRows) {
     byte[] defaultValue = null;
     if (dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.DATE) {
       defaultValue = FilterUtil
@@ -429,16 +429,16 @@
     }
     BitSet bitSet = null;
     if (dimensionColumnPage.isExplicitSorted()) {
-      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnPage, numerOfRows,
+      bitSet = setFilteredIndexToBitSetWithColumnIndex(dimensionColumnPage, numberOfRows,
           dimensionColumnPage.isAdaptiveEncoded() ? null : defaultValue);
     } else {
-      bitSet = setFilterdIndexToBitSet(dimensionColumnPage, numerOfRows,
+      bitSet = setFilteredIndexToBitSet(dimensionColumnPage, numberOfRows,
           dimensionColumnPage.isAdaptiveEncoded() ? null : defaultValue);
     }
     if (dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.STRING) {
       defaultValue = CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY;
     }
-    if (dimensionColumnPage.isNoDicitionaryColumn() ||
+    if (dimensionColumnPage.isNoDictionaryColumn() ||
         dimColEvaluatorInfoList.get(0).getDimension().getDataType() == DataTypes.DATE) {
       FilterUtil.removeNullValues(dimensionColumnPage, bitSet, defaultValue);
     }
@@ -449,16 +449,16 @@
    * Method will scan the block and finds the range start index from which all members
    * will be considered for applying range filters. this method will be called if the
    * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
+   * accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      DimensionColumnPage dimensionColumnPage, int numerOfRows,
+  private BitSet setFilteredIndexToBitSetWithColumnIndex(
+      DimensionColumnPage dimensionColumnPage, int numberOfRows,
       byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
+    BitSet bitSet = new BitSet(numberOfRows);
     int start = 0;
     int last = 0;
     int startIndex = 0;
@@ -468,12 +468,12 @@
     //find the number of default values to skip the null value in case of direct dictionary
     if (null != defaultValue) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               defaultValue, true);
       if (start < 0) {
         skip = -(start + 1);
         // end of block
-        if (skip == numerOfRows) {
+        if (skip == numberOfRows) {
           return bitSet;
         }
       } else {
@@ -486,7 +486,7 @@
 
     for (int i = 0; i < filterValues.length; i++) {
       start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numerOfRows - 1,
+          .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex, numberOfRows - 1,
               filterValues[i], false);
       if (start >= 0) {
         // Logic will handle the case where the range filter member is not present in block
@@ -498,7 +498,7 @@
       }
       if (start < 0) {
         start = -(start + 1);
-        if (start >= numerOfRows) {
+        if (start >= numberOfRows) {
           start = start - 1;
         }
         // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
@@ -527,15 +527,15 @@
    * Method will scan the block and finds the range start index from which all
    * members will be considered for applying range filters. this method will
    * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
+   * mapping will be present for accessing the members from the block.
    *
    * @param dimensionColumnPage
-   * @param numerOfRows
+   * @param numberOfRows
    * @return BitSet.
    */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnPage dimensionColumnPage,
-      int numerOfRows, byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
+  private BitSet setFilteredIndexToBitSet(DimensionColumnPage dimensionColumnPage,
+      int numberOfRows, byte[] defaultValue) {
+    BitSet bitSet = new BitSet(numberOfRows);
     byte[][] filterValues = this.filterRangeValues;
     // binary search can only be applied if column is sorted
     if (isNaturalSorted && dimensionColumnPage.isExplicitSorted()) {
@@ -547,11 +547,11 @@
       if (null != defaultValue) {
         start = CarbonUtil
             .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex,
-                numerOfRows - 1, defaultValue, true);
+                numberOfRows - 1, defaultValue, true);
         if (start < 0) {
           skip = -(start + 1);
           // end of block
-          if (skip == numerOfRows) {
+          if (skip == numberOfRows) {
             return bitSet;
           }
         } else {
@@ -564,7 +564,7 @@
       for (int k = 0; k < filterValues.length; k++) {
         start = CarbonUtil
             .getFirstIndexUsingBinarySearch(dimensionColumnPage, startIndex,
-                numerOfRows - 1, filterValues[k], false);
+                numberOfRows - 1, filterValues[k], false);
         if (start >= 0) {
           start =
               CarbonUtil.nextLesserValueToTarget(start, dimensionColumnPage, filterValues[k]);
@@ -572,8 +572,8 @@
         if (start < 0) {
           start = -(start + 1);
 
-          if (start >= numerOfRows) {
-            start = numerOfRows - 1;
+          if (start >= numberOfRows) {
+            start = numberOfRows - 1;
           }
           // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
           // will be pointing to the next consecutive position. So compare it again and point to the
@@ -594,7 +594,7 @@
       }
     } else {
       for (int k = 0; k < filterValues.length; k++) {
-        for (int i = 0; i < numerOfRows; i++) {
+        for (int i = 0; i < numberOfRows; i++) {
           if (ByteUtil.compare(dimensionColumnPage.getChunkData(i), filterValues[k]) < 0) {
             bitSet.set(i);
           }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeTypeExecuterFactory.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeTypeExecutorFactory.java
similarity index 82%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeTypeExecuterFactory.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeTypeExecutorFactory.java
index da9da0a..c872349 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeTypeExecuterFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeTypeExecutorFactory.java
@@ -18,35 +18,35 @@
 package org.apache.carbondata.core.scan.filter.executer;
 
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.filter.resolver.RowLevelRangeFilterResolverImpl;
 
-public class RowLevelRangeTypeExecuterFactory {
+public class RowLevelRangeTypeExecutorFactory {
 
-  private RowLevelRangeTypeExecuterFactory() {
+  private RowLevelRangeTypeExecutorFactory() {
 
   }
 
   /**
-   * The method returns the Row Level Range fiter type instance based on
+   * The method returns the Row Level Range filter type instance based on
    * filter tree resolver type.
    *
    * @param filterExpressionResolverTree
    * @param segmentProperties
    * @return the generator instance
    */
-  public static RowLevelFilterExecuterImpl getRowLevelRangeTypeExecuter(
-      FilterExecuterType filterExecuterType, FilterResolverIntf filterExpressionResolverTree,
+  public static RowLevelFilterExecutorImpl getRowLevelRangeTypeExecutor(
+      FilterExecutorType filterExecutorType, FilterResolverIntf filterExpressionResolverTree,
       SegmentProperties segmentProperties) {
-    switch (filterExecuterType) {
+    switch (filterExecutorType) {
 
       case ROWLEVEL_LESSTHAN:
-        return new RowLevelRangeLessThanFilterExecuterImpl(
+        return new RowLevelRangeLessThanFilterExecutorImpl(
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
                 .getDimColEvaluatorInfoList(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
+                .getMsrColEvaluatorInfoList(),
             filterExpressionResolverTree.getFilterExpression(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
@@ -54,11 +54,11 @@
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
             .getMeasureFilterRangeValues(), segmentProperties);
       case ROWLEVEL_LESSTHAN_EQUALTO:
-        return new RowLevelRangeLessThanEqualFilterExecuterImpl(
+        return new RowLevelRangeLessThanEqualFilterExecutorImpl(
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
                 .getDimColEvaluatorInfoList(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
+                .getMsrColEvaluatorInfoList(),
             filterExpressionResolverTree.getFilterExpression(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
@@ -66,11 +66,11 @@
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
                 .getMeasureFilterRangeValues(), segmentProperties);
       case ROWLEVEL_GREATERTHAN_EQUALTO:
-        return new RowLevelRangeGrtrThanEquaToFilterExecuterImpl(
+        return new RowLevelRangeGreaterThanEqualFilterExecutorImpl(
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
                 .getDimColEvaluatorInfoList(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
+                .getMsrColEvaluatorInfoList(),
             filterExpressionResolverTree.getFilterExpression(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
@@ -78,11 +78,11 @@
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
                 .getMeasureFilterRangeValues(), segmentProperties);
       case ROWLEVEL_GREATERTHAN:
-        return new RowLevelRangeGrtThanFiterExecuterImpl(
+        return new RowLevelRangeGreaterThanFilterExecutorImpl(
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
                 .getDimColEvaluatorInfoList(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
+                .getMsrColEvaluatorInfoList(),
             filterExpressionResolverTree.getFilterExpression(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
             ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/TrueFilterExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/TrueFilterExecutor.java
index cde65bb..7014d29 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/TrueFilterExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/TrueFilterExecutor.java
@@ -23,7 +23,7 @@
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 
-public class TrueFilterExecutor implements FilterExecuter {
+public class TrueFilterExecutor implements FilterExecutor {
 
   /**
    * API will apply filter based on resolver instance
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/intf/FilterExecuterType.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/intf/FilterExecutorType.java
similarity index 94%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/intf/FilterExecuterType.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/intf/FilterExecutorType.java
index 58d95c1..b101aa2 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/intf/FilterExecuterType.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/intf/FilterExecutorType.java
@@ -19,7 +19,7 @@
 
 import java.io.Serializable;
 
-public enum FilterExecuterType implements Serializable {
+public enum FilterExecutorType implements Serializable {
 
   INCLUDE, EXCLUDE, OR, AND, RESTRUCTURE, ROWLEVEL, RANGE, ROWLEVEL_GREATERTHAN,
   ROWLEVEL_GREATERTHAN_EQUALTO, ROWLEVEL_LESSTHAN_EQUALTO, ROWLEVEL_LESSTHAN, TRUE, FALSE
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/optimizer/RangeFilterOptmizer.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/optimizer/RangeFilterOptimizer.java
similarity index 92%
rename from core/src/main/java/org/apache/carbondata/core/scan/filter/optimizer/RangeFilterOptmizer.java
rename to core/src/main/java/org/apache/carbondata/core/scan/filter/optimizer/RangeFilterOptimizer.java
index 78f04b5..a10297a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/optimizer/RangeFilterOptmizer.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/optimizer/RangeFilterOptimizer.java
@@ -21,11 +21,11 @@
 import org.apache.carbondata.core.scan.expression.RangeExpressionEvaluator;
 import org.apache.carbondata.core.scan.filter.intf.FilterOptimizer;
 
-public class RangeFilterOptmizer implements FilterOptimizer {
+public class RangeFilterOptimizer implements FilterOptimizer {
 
   RangeExpressionEvaluator rangeExpEvaluator;
 
-  public RangeFilterOptmizer(Expression filterExpression) {
+  public RangeFilterOptimizer(Expression filterExpression) {
     this.rangeExpEvaluator = new RangeExpressionEvaluator(filterExpression);
 
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
index b7ee46f..bc65114 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
@@ -28,7 +28,7 @@
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.expression.logical.RangeExpression;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.metadata.FilterResolverMetadata;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
@@ -59,7 +59,7 @@
   /**
    * This API will resolve the filter expression and generates the
    * dictionaries for executing/evaluating the filter expressions in the
-   * executer layer.
+   * executor layer.
    *
    * @throws FilterUnsupportedException
    */
@@ -89,7 +89,7 @@
           // dimColResolvedFilterInfo
           //visitable object with filter member values based on the visitor type, currently there
           //3 types of visitors custom,direct and no dictionary, all types of visitor populate
-          //the visitable instance as per its buisness logic which is different for all the
+          //the visitable instance as per its business logic which is different for all the
           // visitors.
           if (columnExpression.isMeasure()) {
             msrColResolvedFilterInfo.setMeasure(columnExpression.getMeasure());
@@ -160,7 +160,7 @@
   }
 
   /**
-   * Left node will not be presentin this scenario
+   * Left node will not be present in this scenario
    *
    * @return left node of type FilterResolverIntf instance
    */
@@ -170,7 +170,7 @@
   }
 
   /**
-   * Right node will not be presentin this scenario
+   * Right node will not be present in this scenario
    *
    * @return left node of type FilterResolverIntf instance
    */
@@ -203,21 +203,21 @@
   }
 
   /**
-   * Method will return the executer type for particular conditional resolver
-   * basically two types of executers will be formed for the conditional query.
+   * Method will return the executor type for particular conditional resolver
+   * basically two types of executors will be formed for the conditional query.
    *
-   * @return the filter executer type
+   * @return the filter executor type
    */
   @Override
-  public FilterExecuterType getFilterExecuterType() {
+  public FilterExecutorType getFilterExecutorType() {
     switch (exp.getFilterExpressionType()) {
       case NOT_EQUALS:
       case NOT_IN:
-        return FilterExecuterType.EXCLUDE;
+        return FilterExecutorType.EXCLUDE;
       case RANGE:
-        return FilterExecuterType.RANGE;
+        return FilterExecutorType.RANGE;
       default:
-        return FilterExecuterType.INCLUDE;
+        return FilterExecutorType.INCLUDE;
     }
 
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
index 57123b8..af33330 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
@@ -21,7 +21,7 @@
 
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
 
@@ -30,7 +30,7 @@
   /**
    * This API will resolve the filter expression and generates the
    * dictionaries for executing/evaluating the filter expressions in the
-   * executer layer.
+   * executor layer.
    *
    * @throws FilterUnsupportedException
    */
@@ -74,7 +74,7 @@
    *
    * @return FilterExecuterType.
    */
-  FilterExecuterType getFilterExecuterType();
+  FilterExecutorType getFilterExecutorType();
 
   Expression getFilterExpression();
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
index c8f4106..e838862 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
@@ -20,7 +20,7 @@
 import org.apache.carbondata.core.scan.expression.BinaryExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
 
@@ -30,24 +30,24 @@
    */
   private static final long serialVersionUID = 5734382980564402914L;
 
-  protected FilterResolverIntf leftEvalutor;
+  protected FilterResolverIntf leftEvaluator;
 
-  protected FilterResolverIntf rightEvalutor;
+  protected FilterResolverIntf rightEvaluator;
 
   protected ExpressionType filterExpressionType;
 
   private BinaryExpression filterExpression;
 
-  public LogicalFilterResolverImpl(FilterResolverIntf leftEvalutor,
-      FilterResolverIntf rightEvalutor, BinaryExpression currentExpression) {
-    this.leftEvalutor = leftEvalutor;
-    this.rightEvalutor = rightEvalutor;
+  public LogicalFilterResolverImpl(FilterResolverIntf leftEvaluator,
+      FilterResolverIntf rightEvaluator, BinaryExpression currentExpression) {
+    this.leftEvaluator = leftEvaluator;
+    this.rightEvaluator = rightEvaluator;
     this.filterExpressionType = currentExpression.getFilterExpressionType();
     this.filterExpression = currentExpression;
   }
 
   /**
-   * Logical filter resolver will return the left and right filter expresison
+   * Logical filter resolver will return the left and right filter expresion
    * node for filter evaluation, so in this instance no implementation is required.
    *
    */
@@ -63,7 +63,7 @@
    * @return FilterResolverIntf.
    */
   public FilterResolverIntf getLeft() {
-    return leftEvalutor;
+    return leftEvaluator;
   }
 
   /**
@@ -73,7 +73,7 @@
    * @return FilterResolverIntf.
    */
   public FilterResolverIntf getRight() {
-    return rightEvalutor;
+    return rightEvaluator;
   }
 
   @Override
@@ -87,12 +87,12 @@
   }
 
   @Override
-  public FilterExecuterType getFilterExecuterType() {
+  public FilterExecutorType getFilterExecutorType() {
     switch (filterExpressionType) {
       case OR:
-        return FilterExecuterType.OR;
+        return FilterExecutorType.OR;
       case AND:
-        return FilterExecuterType.AND;
+        return FilterExecutorType.AND;
 
       default:
         return null;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
index d2ae50f..ded0385 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
@@ -25,7 +25,7 @@
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.conditional.ConditionalExpression;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
 
@@ -64,7 +64,7 @@
           dimColumnEvaluatorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
           dimColumnEvaluatorInfo.setRowIndex(index++);
           dimColumnEvaluatorInfo.setDimension(columnExpression.getDimension());
-          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
+          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSlice(false);
           dimColEvaluatorInfoList.add(dimColumnEvaluatorInfo);
         } else {
           msrColumnEvalutorInfo = new MeasureColumnResolvedFilterInfo();
@@ -87,13 +87,13 @@
    * and will be send to the spark for processing
    */
   @Override
-  public FilterExecuterType getFilterExecuterType() {
-    return FilterExecuterType.ROWLEVEL;
+  public FilterExecutorType getFilterExecutorType() {
+    return FilterExecutorType.ROWLEVEL;
   }
 
   /**
    * Method will the read filter expression corresponding to the resolver.
-   * This method is required in row level executer inorder to evaluate the filter
+   * This method is required in row level executor inorder to evaluate the filter
    * expression against spark, as mentioned above row level is a special type
    * filter resolver.
    *
@@ -115,7 +115,7 @@
   }
 
   /**
-   * Method will return the DimColumnResolvedFilterInfo instance which containts
+   * Method will return the DimColumnResolvedFilterInfo instance which contains
    * measure level details.
    *
    * @return MeasureColumnResolvedFilterInfo
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
index 96ae473..16bd417 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
@@ -39,7 +39,7 @@
 import org.apache.carbondata.core.scan.expression.logical.BinaryLogicalExpression;
 import org.apache.carbondata.core.scan.filter.ColumnFilterInfo;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
 import org.apache.carbondata.core.util.ByteUtil;
@@ -143,18 +143,18 @@
       } catch (FilterIllegalMemberException e) {
         // Any invalid member while evaluation shall be ignored, system will log the
         // error only once since all rows the evaluation happens so inorder to avoid
-        // too much log inforation only once the log will be printed.
+        // too much log information only once the log will be printed.
         FilterUtil.logError(e, invalidRowsPresent);
       }
     }
-    Comparator<byte[]> filterNoDictValueComaparator = new Comparator<byte[]>() {
+    Comparator<byte[]> filterNoDictValueComparator = new Comparator<byte[]>() {
       @Override
       public int compare(byte[] filterMember1, byte[] filterMember2) {
         return ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterMember1, filterMember2);
       }
 
     };
-    Collections.sort(filterValuesList, filterNoDictValueComaparator);
+    Collections.sort(filterValuesList, filterNoDictValueComparator);
     return filterValuesList;
   }
 
@@ -177,7 +177,7 @@
       } catch (FilterIllegalMemberException e) {
         // Any invalid member while evaluation shall be ignored, system will log the
         // error only once since all rows the evaluation happens so inorder to avoid
-        // too much log inforation only once the log will be printed.
+        // too much log information only once the log will be printed.
         FilterUtil.logError(e, invalidRowsPresent);
       }
     }
@@ -205,7 +205,7 @@
           dimColumnEvaluatorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
           dimColumnEvaluatorInfo.setRowIndex(index++);
           dimColumnEvaluatorInfo.setDimension(columnExpression.getDimension());
-          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
+          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSlice(false);
           if (columnExpression.getDimension().getDataType() == DataTypes.DATE) {
             if (!isIncludeFilter) {
               filterInfo.setExcludeFilterList(getDirectSurrogateValues(columnExpression));
@@ -228,7 +228,7 @@
           msrColumnEvalutorInfo.setCarbonColumn(columnExpression.getCarbonColumn());
           msrColumnEvalutorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
           msrColumnEvalutorInfo.setType(columnExpression.getCarbonColumn().getDataType());
-          msrColumnEvalutorInfo.setMeasureExistsInCurrentSilce(false);
+          msrColumnEvalutorInfo.setMeasureExistsInCurrentSlice(false);
           filterInfo
               .setMeasuresFilterValuesList(getMeasureRangeValues(columnExpression.getMeasure()));
           filterInfo.setIncludeFilter(isIncludeFilter);
@@ -298,12 +298,12 @@
   }
 
   /**
-   * Method will return the DimColumnResolvedFilterInfo instance which containts
+   * Method will return the DimColumnResolvedFilterInfo instance which contains
    * measure level details.
    *
    * @return MeasureColumnResolvedFilterInfo
    */
-  public List<MeasureColumnResolvedFilterInfo> getMsrColEvalutorInfoList() {
+  public List<MeasureColumnResolvedFilterInfo> getMsrColEvaluatorInfoList() {
     return msrColEvalutorInfoList;
   }
 
@@ -316,24 +316,24 @@
   }
 
   /**
-   * This method will provide the executer type to the callee inorder to identify
+   * This method will provide the executor type to the callee inorder to identify
    * the executer type for the filter resolution, Row level filter executer is a
    * special executer since it get all the rows of the specified filter dimension
    * and will be send to the spark for processing
    */
-  public FilterExecuterType getFilterExecuterType() {
+  public FilterExecutorType getFilterExecutorType() {
     switch (exp.getFilterExpressionType()) {
       case GREATERTHAN:
-        return FilterExecuterType.ROWLEVEL_GREATERTHAN;
+        return FilterExecutorType.ROWLEVEL_GREATERTHAN;
       case GREATERTHAN_EQUALTO:
-        return FilterExecuterType.ROWLEVEL_GREATERTHAN_EQUALTO;
+        return FilterExecutorType.ROWLEVEL_GREATERTHAN_EQUALTO;
       case LESSTHAN:
-        return FilterExecuterType.ROWLEVEL_LESSTHAN;
+        return FilterExecutorType.ROWLEVEL_LESSTHAN;
       case LESSTHAN_EQUALTO:
-        return FilterExecuterType.ROWLEVEL_LESSTHAN_EQUALTO;
+        return FilterExecutorType.ROWLEVEL_LESSTHAN_EQUALTO;
 
       default:
-        return FilterExecuterType.ROWLEVEL;
+        return FilterExecutorType.ROWLEVEL;
     }
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
index fd019f6..2d53e5c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
@@ -41,12 +41,12 @@
    */
   private int rowIndex = -1;
 
-  private boolean isDimensionExistsInCurrentSilce = true;
+  private boolean isDimensionExistsInCurrentSlice = true;
 
   private CarbonDimension dimension;
 
   /**
-   * reolved filter object of a particlar filter Expression.
+   * resolved filter object of a particular filter Expression.
    */
   private ColumnFilterInfo resolvedFilterValueObj;
 
@@ -58,13 +58,13 @@
 
   public void addDimensionResolvedFilterInstance(CarbonDimension dimension,
       ColumnFilterInfo filterResolvedObj) {
-    List<ColumnFilterInfo> currentVals = dimensionResolvedFilter.get(dimension);
-    if (null == currentVals) {
-      currentVals = new ArrayList<ColumnFilterInfo>(20);
-      currentVals.add(filterResolvedObj);
-      dimensionResolvedFilter.put(dimension, currentVals);
+    List<ColumnFilterInfo> currentValues = dimensionResolvedFilter.get(dimension);
+    if (null == currentValues) {
+      currentValues = new ArrayList<ColumnFilterInfo>(20);
+      currentValues.add(filterResolvedObj);
+      dimensionResolvedFilter.put(dimension, currentValues);
     } else {
-      currentVals.add(filterResolvedObj);
+      currentValues.add(filterResolvedObj);
     }
   }
 
@@ -104,12 +104,12 @@
     this.rowIndex = rowIndex;
   }
 
-  public boolean isDimensionExistsInCurrentSilce() {
-    return isDimensionExistsInCurrentSilce;
+  public boolean isDimensionExistsInCurrentSlice() {
+    return isDimensionExistsInCurrentSlice;
   }
 
-  public void setDimensionExistsInCurrentSilce(boolean isDimensionExistsInCurrentSilce) {
-    this.isDimensionExistsInCurrentSilce = isDimensionExistsInCurrentSilce;
+  public void setDimensionExistsInCurrentSlice(boolean isDimensionExistsInCurrentSilce) {
+    this.isDimensionExistsInCurrentSlice = isDimensionExistsInCurrentSilce;
   }
 
   public void populateFilterInfoBasedOnColumnType(ResolvedFilterInfoVisitorIntf visitor,
@@ -134,7 +134,7 @@
     dimColumnResolvedFilterInfo.resolvedFilterValueObj = this.resolvedFilterValueObj;
     dimColumnResolvedFilterInfo.rowIndex = this.rowIndex;
     dimColumnResolvedFilterInfo.dimensionResolvedFilter = this.dimensionResolvedFilter;
-    dimColumnResolvedFilterInfo.isDimensionExistsInCurrentSilce = isDimensionExistsInCurrentSilce;
+    dimColumnResolvedFilterInfo.isDimensionExistsInCurrentSlice = isDimensionExistsInCurrentSlice;
     dimColumnResolvedFilterInfo.columnIndexInMinMaxByteArray = columnIndexInMinMaxByteArray;
     return dimColumnResolvedFilterInfo;
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
index 838e1ab..9d8e147 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
@@ -18,7 +18,7 @@
 package org.apache.carbondata.core.scan.filter.resolver.resolverinfo;
 
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.ConditionalFilterResolverImpl;
 
 /* The expression with If FALSE will be resolved setting empty bitset. */
@@ -36,18 +36,18 @@
   }
 
   /**
-   * This method will provide the executer type to the callee inorder to identify
-   * the executer type for the filter resolution, False Expresssion willl not execute anything.
+   * This method will provide the executor type to the callee inorder to identify
+   * the executer type for the filter resolution, False Expression will not execute anything.
    * it will return empty bitset
    */
   @Override
-  public FilterExecuterType getFilterExecuterType() {
-    return FilterExecuterType.FALSE;
+  public FilterExecutorType getFilterExecutorType() {
+    return FilterExecutorType.FALSE;
   }
 
   /**
    * Method will the read filter expression corresponding to the resolver.
-   * This method is required in row level executer inorder to evaluate the filter
+   * This method is required in row level executor inorder to evaluate the filter
    * expression against spark, as mentioned above row level is a special type
    * filter resolver.
    *
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
index 9becac0..14d8193 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
@@ -41,14 +41,14 @@
 
   private int rowIndex = -1;
 
-  private boolean isMeasureExistsInCurrentSilce = true;
+  private boolean isMeasureExistsInCurrentSlice = true;
 
   private CarbonColumn carbonColumn;
 
   private CarbonMeasure carbonMeasure;
 
   /**
-   * reolved filter object of a particlar filter Expression.
+   * resolved filter object of a particular filter Expression.
    */
   private ColumnFilterInfo resolvedFilterValueObj;
 
@@ -66,13 +66,13 @@
 
   public void addMeasureResolvedFilterInstance(CarbonMeasure measures,
       ColumnFilterInfo filterResolvedObj) {
-    List<ColumnFilterInfo> currentVals = measureResolvedFilter.get(measures);
-    if (null == currentVals) {
-      currentVals = new ArrayList<ColumnFilterInfo>(20);
-      currentVals.add(filterResolvedObj);
-      measureResolvedFilter.put(measures, currentVals);
+    List<ColumnFilterInfo> currentValues = measureResolvedFilter.get(measures);
+    if (null == currentValues) {
+      currentValues = new ArrayList<ColumnFilterInfo>(20);
+      currentValues.add(filterResolvedObj);
+      measureResolvedFilter.put(measures, currentValues);
     } else {
-      currentVals.add(filterResolvedObj);
+      currentValues.add(filterResolvedObj);
     }
   }
 
@@ -121,8 +121,8 @@
     throw new UnsupportedOperationException("Operation not supported");
   }
 
-  public void setMeasureExistsInCurrentSilce(boolean measureExistsInCurrentSilce) {
-    isMeasureExistsInCurrentSilce = measureExistsInCurrentSilce;
+  public void setMeasureExistsInCurrentSlice(boolean measureExistsInCurrentSlice) {
+    isMeasureExistsInCurrentSlice = measureExistsInCurrentSlice;
   }
 
   public void setMeasure(CarbonMeasure carbonMeasure) {
@@ -151,7 +151,7 @@
     msrColumnResolvedFilterInfo.resolvedFilterValueObj = this.resolvedFilterValueObj;
     msrColumnResolvedFilterInfo.rowIndex = this.rowIndex;
     msrColumnResolvedFilterInfo.measureResolvedFilter = this.measureResolvedFilter;
-    msrColumnResolvedFilterInfo.setMeasureExistsInCurrentSilce(this.isMeasureExistsInCurrentSilce);
+    msrColumnResolvedFilterInfo.setMeasureExistsInCurrentSlice(this.isMeasureExistsInCurrentSlice);
     msrColumnResolvedFilterInfo.columnIndexInMinMaxByteArray = columnIndexInMinMaxByteArray;
     return msrColumnResolvedFilterInfo;
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
index d449be2..22add32 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
@@ -18,7 +18,7 @@
 package org.apache.carbondata.core.scan.filter.resolver.resolverinfo;
 
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.core.scan.filter.intf.FilterExecutorType;
 import org.apache.carbondata.core.scan.filter.resolver.ConditionalFilterResolverImpl;
 
 /* The expression with If TRUE will be resolved setting all bits to TRUE. */
@@ -36,13 +36,13 @@
   }
 
   /**
-   * This method will provide the executer type to the callee inorder to identify
+   * This method will provide the executor type to the callee inorder to identify
    * the executer type for the filter resolution, Row level filter executer is a
    * special executer since it get all the rows of the specified filter dimension
    * and will be send to the spark for processing
    */
   @Override
-  public FilterExecuterType getFilterExecuterType() {
-    return FilterExecuterType.TRUE;
+  public FilterExecutorType getFilterExecutorType() {
+    return FilterExecutorType.TRUE;
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
index 6d7002c..e81007d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
@@ -39,7 +39,7 @@
 
   /**
    * This Visitor method is been used to resolve or populate the filter details
-   * by using custom type dictionary value, the filter membrers will be resolved using
+   * by using custom type dictionary value, the filter members will be resolved using
    * custom type function which will generate dictionary for the direct column type filter members
    *
    * @param visitableObj
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
index eb58ece..ae89536 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
@@ -28,7 +28,7 @@
    * dimColResolvedFilterInfo visitable object with filter member values based
    * on the visitor type, currently there 3 types of visitors custom,direct
    * and no dictionary, all types of visitor populate the visitable instance
-   * as per its buisness logic which is different for all the visitors.
+   * as per its business logic which is different for all the visitors.
    *
    * @param visitableObj
    * @param metadata
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
index faec145..eec5f8d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
@@ -204,10 +204,10 @@
       col.setDimension(dim);
       col.setDimension(true);
     } else {
-      // in case of sdk or fileformat, there can be chance that each carbondata file may have
+      // in case of sdk or file format, there can be chance that each carbondata file may have
       // different schema, so every segment properties will have dims and measures based on
       // corresponding segment. So the filter column may not be present in it. so generate the
-      // dimension and measure from the carbontable
+      // dimension and measure from the carbon table
       CarbonDimension dimension =
           table.getDimensionByName(col.getColumnName());
       CarbonMeasure measure = table.getMeasureByName(col.getColumnName());
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
index 6e3435c..adbba56 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
@@ -127,19 +127,19 @@
     Map<Integer, List<Integer>> complexColumnMap = new HashMap<>();
     List<ProjectionDimension> carbonDimensions = projection.getDimensions();
     // Traverse and find out if the top most parent of projection column is already there
-    List<CarbonDimension> projectionDimenesionToBeMerged = new ArrayList<>();
+    List<CarbonDimension> projectionDimensionToBeMerged = new ArrayList<>();
     for (ProjectionDimension projectionDimension : carbonDimensions) {
       CarbonDimension complexParentDimension =
           projectionDimension.getDimension().getComplexParentDimension();
       if (null != complexParentDimension && isAlreadyExists(complexParentDimension,
           carbonDimensions)) {
-        projectionDimenesionToBeMerged.add(projectionDimension.getDimension());
+        projectionDimensionToBeMerged.add(projectionDimension.getDimension());
       }
     }
 
-    if (projectionDimenesionToBeMerged.size() != 0) {
+    if (projectionDimensionToBeMerged.size() != 0) {
       projection =
-          removeMergedDimensions(projectionDimenesionToBeMerged, projectionColumns, factTableName);
+          removeMergedDimensions(projectionDimensionToBeMerged, projectionColumns, factTableName);
       carbonDimensions = projection.getDimensions();
     }
 
@@ -220,7 +220,7 @@
       for (int j = i; j < childOrdinals.size(); j++) {
         CarbonDimension parentDimension = getDimensionBasedOnOrdinal(dimList, childOrdinals.get(i));
         CarbonDimension childDimension = getDimensionBasedOnOrdinal(dimList, childOrdinals.get(j));
-        if (!mergedChild.contains(childOrdinals.get(j)) && checkChildsInSamePath(parentDimension,
+        if (!mergedChild.contains(childOrdinals.get(j)) && checkChildrenInSamePath(parentDimension,
             childDimension)) {
           mergedChild.add(childDimension);
         }
@@ -229,7 +229,7 @@
     return mergedChild;
   }
 
-  private boolean checkChildsInSamePath(CarbonDimension parentDimension,
+  private boolean checkChildrenInSamePath(CarbonDimension parentDimension,
       CarbonDimension childDimension) {
     if (parentDimension.getColName().equals(childDimension.getColName())) {
       return false;
@@ -327,7 +327,7 @@
       boolean[] isFilterMeasures = new boolean[table.getAllMeasures().size()];
       queryModel.setIsFilterDimensions(isFilterDimensions);
       queryModel.setIsFilterMeasures(isFilterMeasures);
-      // In case of Dictionary Include Range Column we donot optimize the range expression
+      // In case of Dictionary Include Range Column we do not optimize the range expression
       if (indexFilter != null) {
         if (isConvertToRangeFilter()) {
           indexFilter.processFilterExpression(isFilterDimensions, isFilterMeasures);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/processor/BlockletIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/processor/BlockletIterator.java
index f0d81ff..f455321 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/processor/BlockletIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/processor/BlockletIterator.java
@@ -27,7 +27,7 @@
   /**
    * data store block
    */
-  protected DataRefNode datablock;
+  protected DataRefNode dataBlock;
   /**
    * block counter to keep a track how many block has been processed
    */
@@ -39,18 +39,18 @@
   private boolean hasNext = true;
 
   /**
-   * total number blocks assgned to this iterator
+   * total number blocks assigned to this iterator
    */
   private long totalNumberOfBlocksToScan;
 
   /**
    * Constructor
    *
-   * @param datablock                 first data block
+   * @param dataBlock                 first data block
    * @param totalNumberOfBlockletToScan total number of blocklets to be scanned
    */
-  BlockletIterator(DataRefNode datablock, long totalNumberOfBlockletToScan) {
-    this.datablock = datablock;
+  BlockletIterator(DataRefNode dataBlock, long totalNumberOfBlockletToScan) {
+    this.dataBlock = dataBlock;
     this.totalNumberOfBlocksToScan = totalNumberOfBlockletToScan;
   }
 
@@ -71,18 +71,18 @@
   @Override
   public DataRefNode next() {
     // get the current blocks
-    DataRefNode datablockTemp = datablock;
+    DataRefNode dataBlockTemp = dataBlock;
     // store the next data block
-    datablock = datablock.getNextDataRefNode();
+    dataBlock = dataBlock.getNextDataRefNode();
     // increment the counter
     blockCounter++;
     // if all the data block is processed then
     // set the has next flag to false
     // or if number of blocks assigned to this iterator is processed
-    // then also set the hasnext flag to false
-    if (null == datablock || blockCounter >= this.totalNumberOfBlocksToScan) {
+    // then also set the hasNext flag to false
+    if (null == dataBlock || blockCounter >= this.totalNumberOfBlocksToScan) {
       hasNext = false;
     }
-    return datablockTemp;
+    return dataBlockTemp;
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
index 4420dff..f570c65 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
@@ -90,7 +90,7 @@
     this.fileReader = fileReader;
     blockletIterator = new BlockletIterator(blockExecutionInfo.getFirstDataBlock(),
         blockExecutionInfo.getNumberOfBlockToScan());
-    if (blockExecutionInfo.getFilterExecuterTree() != null) {
+    if (blockExecutionInfo.getFilterExecutorTree() != null) {
       blockletScanner = new BlockletFilterScanner(blockExecutionInfo, queryStatisticsModel);
     } else {
       blockletScanner = new BlockletFullScanner(blockExecutionInfo, queryStatisticsModel);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
index a3e921c..95e6327 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
@@ -129,7 +129,7 @@
   private int[] complexParentBlockIndexes;
 
   /**
-   * blockletid+pageumber to deleted reocrd map
+   * blockletId+pageNumber to deleted record map
    */
   private Map<String, DeleteDeltaVo> deletedRecordMap;
 
@@ -160,7 +160,7 @@
     this.fixedLengthKeySize = blockExecutionInfo.getFixedLengthKeySize();
     this.noDictionaryColumnChunkIndexes = blockExecutionInfo.getNoDictionaryColumnChunkIndexes();
     this.dictionaryColumnChunkIndexes = blockExecutionInfo.getDictionaryColumnChunkIndex();
-    this.complexParentIndexToQueryMap = blockExecutionInfo.getComlexDimensionInfoMap();
+    this.complexParentIndexToQueryMap = blockExecutionInfo.getComplexDimensionInfoMap();
     this.complexParentBlockIndexes = blockExecutionInfo.getComplexColumnParentBlockIndexes();
     this.totalDimensionsSize = blockExecutionInfo.getProjectionDimensions().length;
     this.deletedRecordMap = blockExecutionInfo.getDeletedRecordsMap();
@@ -263,7 +263,7 @@
 
   public void fillColumnarComplexBatch(ColumnVectorInfo[] vectorInfos) {
     ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
-    ReUsableByteArrayDataOutputStream reuseableDataOutput =
+    ReUsableByteArrayDataOutputStream reusableDataOutput =
         new ReUsableByteArrayDataOutputStream(byteStream);
     boolean isExceptionThrown = false;
     for (int i = 0; i < vectorInfos.length; i++) {
@@ -276,23 +276,23 @@
           vectorInfos[i].genericQueryType
               .parseBlocksAndReturnComplexColumnByteArray(dimRawColumnChunks, dimensionColumnPages,
                   pageFilteredRowId == null ? j : pageFilteredRowId[pageCounter][j], pageCounter,
-                  reuseableDataOutput);
+                  reusableDataOutput);
           Object data = vectorInfos[i].genericQueryType
-              .getDataBasedOnDataType(ByteBuffer.wrap(reuseableDataOutput.getByteArray()));
+              .getDataBasedOnDataType(ByteBuffer.wrap(reusableDataOutput.getByteArray()));
           vector.putObject(vectorOffset++, data);
-          reuseableDataOutput.reset();
+          reusableDataOutput.reset();
         } catch (IOException e) {
           isExceptionThrown = true;
           LOGGER.error(e.getMessage(), e);
         } finally {
           if (isExceptionThrown) {
-            CarbonUtil.closeStreams(reuseableDataOutput);
+            CarbonUtil.closeStreams(reusableDataOutput);
             CarbonUtil.closeStreams(byteStream);
           }
         }
       }
     }
-    CarbonUtil.closeStreams(reuseableDataOutput);
+    CarbonUtil.closeStreams(reusableDataOutput);
     CarbonUtil.closeStreams(byteStream);
   }
 
@@ -323,7 +323,7 @@
   }
 
   /**
-   * Just increment the counter incase of query only on measures.
+   * Just increment the counter in case of query only on measures.
    */
   public void incrementCounter() {
     rowCounter++;
@@ -442,7 +442,7 @@
     clearValidRowIdList();
   }
 
-  public int numberOfpages() {
+  public int numberOfPages() {
     return pageFilteredRowCount.length;
   }
 
@@ -521,7 +521,7 @@
   protected List<byte[][]> getComplexTypeKeyArrayBatch() {
     List<byte[][]> complexTypeArrayList = new ArrayList<>(validRowIds.size());
     ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
-    ReUsableByteArrayDataOutputStream reUseableDataOutput =
+    ReUsableByteArrayDataOutputStream reusableDataOutput =
         new ReUsableByteArrayDataOutputStream(byteStream);
     boolean isExceptionThrown = false;
     byte[][] complexTypeData = null;
@@ -538,23 +538,23 @@
         try {
           genericQueryType
               .parseBlocksAndReturnComplexColumnByteArray(dimRawColumnChunks, dimensionColumnPages,
-                  validRowIds.get(j), pageCounter, reUseableDataOutput);
+                  validRowIds.get(j), pageCounter, reusableDataOutput);
           // get the key array in columnar way
           byte[][] complexKeyArray = complexTypeArrayList.get(j);
           complexKeyArray[i] = byteStream.toByteArray();
-          reUseableDataOutput.reset();
+          reusableDataOutput.reset();
         } catch (IOException e) {
           isExceptionThrown = true;
           LOGGER.error(e.getMessage(), e);
         } finally {
           if (isExceptionThrown) {
-            CarbonUtil.closeStreams(reUseableDataOutput);
+            CarbonUtil.closeStreams(reusableDataOutput);
             CarbonUtil.closeStreams(byteStream);
           }
         }
       }
     }
-    CarbonUtil.closeStreams(reUseableDataOutput);
+    CarbonUtil.closeStreams(reusableDataOutput);
     CarbonUtil.closeStreams(byteStream);
     return complexTypeArrayList;
   }
@@ -573,7 +573,7 @@
   public void setBlockletId(String blockletId, String blockletNumber) {
     this.blockletId = blockletId + CarbonCommonConstants.FILE_SEPARATOR + blockletNumber;
     this.blockletNumber = blockletNumber;
-    // if deleted recors map is present for this block
+    // if deleted record map is present for this block
     // then get the first page deleted vo
     if (null != deletedRecordMap) {
       String key;
@@ -727,7 +727,7 @@
   public abstract int getCurrentRowId();
 
   /**
-   * @return dictionary key array for all the dictionary dimension in integer array forat
+   * @return dictionary key array for all the dictionary dimension in integer array format
    * selected in query
    */
   public abstract int[] getDictionaryKeyIntegerArray();
@@ -750,7 +750,7 @@
       completeKey = new byte[fixedLengthKeySize];
       dictionaryKeyArrayList.add(completeKey);
     }
-    // initialize offset array onli if data is present
+    // initialize offset array if data is present
     if (this.dictionaryColumnChunkIndexes.length > 0) {
       columnDataOffsets = new int[validRowIds.size()];
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
index 25ba9a0..c186701 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
@@ -97,7 +97,7 @@
     this.fileReader.setReadPageByPage(queryModel.isReadPageByPage());
     this.execService = execService;
     initialiseInfos();
-    initQueryStatiticsModel();
+    initQueryStatisticsModel();
   }
 
   private void initialiseInfos() {
@@ -125,11 +125,11 @@
    *
    * @param dataBlock       data block
    * @param deleteDeltaInfo delete delta info
-   * @return blockid+pageid to deleted row mapping
+   * @return blockId+pageId to deleted row mapping
    */
   private Map<String, DeleteDeltaVo> getDeleteDeltaDetails(AbstractIndex dataBlock,
       DeleteDeltaInfo deleteDeltaInfo) {
-    // if datablock deleted delta timestamp is more then the current delete delta files timestamp
+    // if data block deleted delta timestamp is more then the current delete delta files timestamp
     // then return the current deleted rows
     if (dataBlock.getDeleteDeltaTimestamp() >= deleteDeltaInfo
         .getLatestDeleteDeltaFileTimestamp()) {
@@ -232,7 +232,7 @@
     return null;
   }
 
-  private void initQueryStatiticsModel() {
+  private void initQueryStatisticsModel() {
     this.queryStatisticsModel = new QueryStatisticsModel();
     this.queryStatisticsModel.setRecorder(recorder);
     QueryStatistic queryStatisticTotalBlocklet = new QueryStatistic();
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSplitterRawResultIterator.java
similarity index 91%
rename from core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
rename to core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSplitterRawResultIterator.java
index 790657a..e9b0d11 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSplitterRawResultIterator.java
@@ -20,13 +20,13 @@
 import org.apache.carbondata.common.CarbonIterator;
 import org.apache.carbondata.core.scan.result.RowBatch;
 
-public class PartitionSpliterRawResultIterator extends CarbonIterator<Object[]> {
+public class PartitionSplitterRawResultIterator extends CarbonIterator<Object[]> {
 
   private CarbonIterator<RowBatch> iterator;
   private RowBatch batch;
   private int counter;
 
-  public PartitionSpliterRawResultIterator(CarbonIterator<RowBatch> iterator) {
+  public PartitionSplitterRawResultIterator(CarbonIterator<RowBatch> iterator) {
     this.iterator = iterator;
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
index 704df8f..21b4ae9 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
@@ -137,7 +137,7 @@
           if (!isBackupFilled) {
             fetchFuture.get();
           }
-          // copy backup buffer to current buffer and fill backup buffer asyn
+          // copy backup buffer to current buffer and fill backup buffer asynchronously
           currentIdxInBuffer = 0;
           currentBuffer.clear();
           currentBuffer = backupBuffer;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
index 471f9b2..634ca7a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
@@ -73,7 +73,7 @@
   }
 
   /**
-   * Mark the rows as filterd first before filling the batch, so that these rows will not be added
+   * Mark the rows as filtered first before filling the batch, so that these rows will not be added
    * to vector batches.
    * @param rowId
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/AbstractCarbonColumnarVector.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/AbstractCarbonColumnarVector.java
index aeeba53..4a148c1 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/AbstractCarbonColumnarVector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/AbstractCarbonColumnarVector.java
@@ -25,7 +25,7 @@
 import org.apache.carbondata.core.scan.scanner.LazyPageLoader;
 
 public abstract class AbstractCarbonColumnarVector
-    implements CarbonColumnVector, ConvertableVector {
+    implements CarbonColumnVector, ConvertibleVector {
 
   protected CarbonColumnVector columnVector;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectFactory.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectFactory.java
index f6d2941..4c7bb07 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectFactory.java
@@ -34,7 +34,7 @@
    * @param invertedIndex    Inverted index of column page
    * @param nullBitset       row locations of nulls in bitset
    * @param deletedRows      deleted rows locations in bitset.
-   * @param isnullBitsExists whether nullbitset present on this page, usually for dimension columns
+   * @param isnullBitsExists whether nullBitset present on this page, usually for dimension columns
    *                         there is no null bitset.
    * @return wrapped CarbonColumnVector
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDelta.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDelta.java
index e3a488c..e7aaac9 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDelta.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDelta.java
@@ -23,7 +23,7 @@
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 
 /**
- * Column vector for column pages which has delete delta, so it uses delta biset to filter out
+ * Column vector for column pages which has delete delta, so it uses delta bitset to filter out
  * data before filling to actual vector.
  */
 class ColumnarVectorWrapperDirectWithDeleteDelta extends AbstractCarbonColumnarVector {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDeltaAndInvertedIndex.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDeltaAndInvertedIndex.java
index e322954..6a188b4 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDeltaAndInvertedIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ColumnarVectorWrapperDirectWithDeleteDeltaAndInvertedIndex.java
@@ -27,8 +27,8 @@
 import org.apache.carbondata.core.scan.result.vector.impl.CarbonColumnVectorImpl;
 
 /**
- * Column vector for column pages which has delete delta and inverted index, so it uses delta biset
- * to filter out data and use inverted index before filling to actual vector
+ * Column vector for column pages which has delete delta and inverted index, so it uses delta
+ * bitset to filter out data and use inverted index before filling to actual vector
  */
 public class ColumnarVectorWrapperDirectWithDeleteDeltaAndInvertedIndex
     extends ColumnarVectorWrapperDirectWithInvertedIndex {
@@ -48,7 +48,7 @@
    * @param invertedIndex Inverted index of the column
    * @param nullBits Null row ordinals in the bitset
    * @param isnullBitsExists whether to consider inverted index while setting null bitset or not.
-   *                          we are having nullbitset even for dimensions also.
+   *                          we are having nullBitset even for dimensions also.
    *                          But some dimension columns still don't have nullbitset.
    *                          So if null bitset does not exist then
    *                          it should not inverted index while setting the null
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ConvertableVector.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ConvertibleVector.java
similarity index 96%
rename from core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ConvertableVector.java
rename to core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ConvertibleVector.java
index 7020c66..6b1102f 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ConvertableVector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/ConvertibleVector.java
@@ -21,7 +21,7 @@
  * This interface provides method to convert the values by using inverted index and delete delta
  * and fill to the underlying vector.
  */
-public interface ConvertableVector {
+public interface ConvertibleVector {
 
   /**
    * Convert the values and fill it to the underlying vector.
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/SequentialFill.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/SequentialFill.java
index a0df68c..8d8dba6 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/SequentialFill.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/directread/SequentialFill.java
@@ -24,7 +24,7 @@
 
 /**
  * It is sort of a marker interface to let execution engine know that it is appendable/sequential
- * data adding vector. It means we cannot add random rowids to it.
+ * data adding vector. It means we cannot add random row ids to it.
  */
 @InterfaceStability.Evolving
 @InterfaceAudience.Internal
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFilterScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFilterScanner.java
index 0c3b847..573e8b9 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFilterScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFilterScanner.java
@@ -29,7 +29,7 @@
 import org.apache.carbondata.core.datastore.page.ColumnPage;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.executer.ImplicitColumnFilterExecutor;
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.scan.result.BlockletScannedResult;
@@ -50,9 +50,9 @@
 public class BlockletFilterScanner extends BlockletFullScanner {
 
   /**
-   * filter executer to evaluate filter condition
+   * filter executor to evaluate filter condition
    */
-  private FilterExecuter filterExecuter;
+  private FilterExecutor filterExecutor;
   /**
    * this will be used to apply min max
    * this will be useful for dimension column which is on the right side
@@ -79,7 +79,7 @@
       isMinMaxEnabled = Boolean.parseBoolean(minMaxEnableValue);
     }
     // get the filter tree
-    this.filterExecuter = blockExecutionInfo.getFilterExecuterTree();
+    this.filterExecutor = blockExecutionInfo.getFilterExecutorTree();
     this.queryStatisticsModel = queryStatisticsModel;
 
     String useBitSetPipeLine = CarbonProperties.getInstance()
@@ -125,15 +125,15 @@
       }
       BitSet bitSet = null;
       // check for implicit include filter instance
-      if (filterExecuter instanceof ImplicitColumnFilterExecutor) {
+      if (filterExecutor instanceof ImplicitColumnFilterExecutor) {
         String blockletId = blockExecutionInfo.getBlockIdString() +
             CarbonCommonConstants.FILE_SEPARATOR + dataBlock.blockletIndex();
-        bitSet = ((ImplicitColumnFilterExecutor) filterExecuter)
+        bitSet = ((ImplicitColumnFilterExecutor) filterExecutor)
             .isFilterValuesPresentInBlockOrBlocklet(
                 dataBlock.getColumnsMaxValue(),
                 dataBlock.getColumnsMinValue(), blockletId, dataBlock.minMaxFlagArray());
       } else {
-        bitSet = this.filterExecuter
+        bitSet = this.filterExecutor
             .isScanRequired(dataBlock.getColumnsMaxValue(),
                 dataBlock.getColumnsMinValue(), dataBlock.minMaxFlagArray());
       }
@@ -145,7 +145,7 @@
   @Override
   public void readBlocklet(RawBlockletColumnChunks rawBlockletColumnChunks) throws IOException {
     long startTime = System.currentTimeMillis();
-    this.filterExecuter.readColumnChunks(rawBlockletColumnChunks);
+    this.filterExecutor.readColumnChunks(rawBlockletColumnChunks);
     // adding statistics for carbon read time
     QueryStatistic readTime = queryStatisticsModel.getStatisticsTypeAndObjMap()
         .get(QueryStatisticsConstants.READ_BLOCKlET_TIME);
@@ -172,11 +172,11 @@
   private BlockletScannedResult executeFilter(RawBlockletColumnChunks rawBlockletColumnChunks)
       throws FilterUnsupportedException, IOException {
     long startTime = System.currentTimeMillis();
-    // set the indexed data if it has any during fgindex pruning.
+    // set the indexed data if it has any during fgIndex pruning.
     BitSetGroup fgBitSetGroup = rawBlockletColumnChunks.getDataBlock().getIndexedData();
     rawBlockletColumnChunks.setBitSetGroup(fgBitSetGroup);
     // apply filter on actual data, for each page
-    BitSetGroup bitSetGroup = this.filterExecuter.applyFilter(rawBlockletColumnChunks,
+    BitSetGroup bitSetGroup = this.filterExecutor.applyFilter(rawBlockletColumnChunks,
         useBitSetPipeLine);
     // if filter result is empty then return with empty result
     if (bitSetGroup.isEmpty()) {
@@ -362,7 +362,7 @@
       throws FilterUnsupportedException, IOException {
     long startTime = System.currentTimeMillis();
     // apply filter on actual data, for each page
-    BitSet pages = this.filterExecuter.prunePages(rawBlockletColumnChunks);
+    BitSet pages = this.filterExecutor.prunePages(rawBlockletColumnChunks);
     // if filter result is empty then return with empty result
     if (pages.isEmpty()) {
       CarbonUtil.freeMemory(rawBlockletColumnChunks.getDimensionRawColumnChunks(),
diff --git a/core/src/main/java/org/apache/carbondata/core/stats/QueryStatistic.java b/core/src/main/java/org/apache/carbondata/core/stats/QueryStatistic.java
index b21bb94..d1fe3c3 100644
--- a/core/src/main/java/org/apache/carbondata/core/stats/QueryStatistic.java
+++ b/core/src/main/java/org/apache/carbondata/core/stats/QueryStatistic.java
@@ -42,7 +42,7 @@
   private long timeTaken;
 
   /**
-   * starttime of the phase
+   * start time of the phase
    */
   private long startTime;
 
@@ -71,10 +71,10 @@
    * For example total time taken for scan or result preparation
    *
    * @param message   statistic message
-   * @param timetaken
+   * @param timeTaken
    */
-  public void addFixedTimeStatistic(String message, long timetaken) {
-    this.timeTaken = timetaken;
+  public void addFixedTimeStatistic(String message, long timeTaken) {
+    this.timeTaken = timeTaken;
     this.message = message;
   }
 
@@ -95,7 +95,7 @@
     if (StringUtils.isEmpty(queryWithTaskId)) {
       return message + timeTaken;
     }
-    return message + " for the taskid : " + queryWithTaskId + " Is : " + timeTaken;
+    return message + " for the task id : " + queryWithTaskId + " Is : " + timeTaken;
   }
 
   public String getMessage() {
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
index 7fb2cbd..b97109b 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
@@ -86,7 +86,7 @@
   /**
    * Segment modification or deletion time stamp
    */
-  private String modificationOrdeletionTimesStamp;
+  private String modificationOrDeletionTimestamp;
 
   private String loadStartTime;
 
@@ -167,21 +167,21 @@
   }
 
   /**
-   * @return the modificationOrdeletionTimesStamp
+   * @return the modificationOrDeletionTimesStamp
    */
-  public long getModificationOrdeletionTimesStamp() {
-    if (null == modificationOrdeletionTimesStamp) {
+  public long getModificationOrDeletionTimestamp() {
+    if (null == modificationOrDeletionTimestamp) {
       return 0;
     }
-    return convertTimeStampToLong(modificationOrdeletionTimesStamp);
+    return convertTimeStampToLong(modificationOrDeletionTimestamp);
   }
 
   /**
-   * @param modificationOrdeletionTimesStamp the modificationOrdeletionTimesStamp to set
+   * @param modificationOrDeletionTimestamp the modificationOrDeletionTimesStamp to set
    */
-  public void setModificationOrdeletionTimesStamp(long modificationOrdeletionTimesStamp) {
-    this.modificationOrdeletionTimesStamp =
-        Long.toString(modificationOrdeletionTimesStamp);
+  public void setModificationOrDeletionTimestamp(long modificationOrDeletionTimestamp) {
+    this.modificationOrDeletionTimestamp =
+        Long.toString(modificationOrDeletionTimestamp);
   }
 
   /* (non-Javadoc)
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
index 16b2a4e..2d2060e 100755
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
@@ -96,7 +96,7 @@
   }
 
   /**
-   * This will return the lock object used to lock the table status file before updation.
+   * This will return the lock object used to lock the table status file before update.
    *
    * @return
    */
@@ -347,7 +347,7 @@
           throw ex;
         }
         try {
-          LOG.warn("Failed to read table status file, retrycount:" + retry);
+          LOG.warn("Failed to read table status file, retry count:" + retry);
           // sleep for some time before retry
           TimeUnit.SECONDS.sleep(READ_TABLE_STATUS_RETRY_TIMEOUT);
         } catch (InterruptedException e) {
@@ -696,7 +696,7 @@
           } else if (SegmentStatus.MARKED_FOR_DELETE != segmentStatus) {
             loadFound = true;
             loadMetadata.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE);
-            loadMetadata.setModificationOrdeletionTimesStamp(CarbonUpdateUtil.readCurrentTime());
+            loadMetadata.setModificationOrDeletionTimestamp(CarbonUpdateUtil.readCurrentTime());
             LOG.info("Segment ID " + loadId + " Marked for Delete");
           }
           break;
@@ -805,7 +805,7 @@
   }
 
   /**
-   * updates segment status and modificaton time details
+   * updates segment status and modification time details
    *
    * @param loadMetadata
    */
@@ -813,7 +813,7 @@
     // update status only if the segment is not marked for delete
     if (SegmentStatus.MARKED_FOR_DELETE != loadMetadata.getSegmentStatus()) {
       loadMetadata.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE);
-      loadMetadata.setModificationOrdeletionTimesStamp(CarbonUpdateUtil.readCurrentTime());
+      loadMetadata.setModificationOrDeletionTimestamp(CarbonUpdateUtil.readCurrentTime());
     }
   }
 
@@ -883,13 +883,13 @@
     LoadMetadataDetails[] listOfLoadFolderDetailsArray =
               SegmentStatusManager.readLoadMetadata(metaPath);
     if (listOfLoadFolderDetailsArray.length != 0) {
-      for (LoadMetadataDetails loaddetail :listOfLoadFolderDetailsArray) {
-        SegmentStatus segmentStatus = loaddetail.getSegmentStatus();
+      for (LoadMetadataDetails loadDetail :listOfLoadFolderDetailsArray) {
+        SegmentStatus segmentStatus = loadDetail.getSegmentStatus();
         if (segmentStatus == SegmentStatus.INSERT_IN_PROGRESS
             || segmentStatus == SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS) {
           loadInProgress =
               isLoadInProgress(carbonTable.getAbsoluteTableIdentifier(),
-                  loaddetail.getLoadName());
+                  loadDetail.getLoadName());
         }
       }
     }
@@ -928,12 +928,12 @@
     LoadMetadataDetails[] listOfLoadFolderDetailsArray =
         SegmentStatusManager.readLoadMetadata(metaPath);
     if (listOfLoadFolderDetailsArray.length != 0) {
-      for (LoadMetadataDetails loaddetail :listOfLoadFolderDetailsArray) {
-        SegmentStatus segmentStatus = loaddetail.getSegmentStatus();
+      for (LoadMetadataDetails loadDetail :listOfLoadFolderDetailsArray) {
+        SegmentStatus segmentStatus = loadDetail.getSegmentStatus();
         if (segmentStatus == SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS) {
           loadInProgress =
               isLoadInProgress(carbonTable.getAbsoluteTableIdentifier(),
-                  loaddetail.getLoadName());
+                  loadDetail.getLoadName());
         }
       }
     }
@@ -1037,13 +1037,13 @@
     }
   }
 
-  private static ReturnTuple isUpdationRequired(boolean isForceDeletion, CarbonTable carbonTable,
+  private static ReturnTuple isUpdateRequired(boolean isForceDeletion, CarbonTable carbonTable,
       AbsoluteTableIdentifier absoluteTableIdentifier, LoadMetadataDetails[] details) {
     // Delete marked loads
-    boolean isUpdationRequired = DeleteLoadFolders
+    boolean isUpdateRequired = DeleteLoadFolders
         .deleteLoadFoldersFromFileSystem(absoluteTableIdentifier, isForceDeletion, details,
             carbonTable.getMetadataPath());
-    return new ReturnTuple(details, isUpdationRequired);
+    return new ReturnTuple(details, isUpdateRequired);
   }
 
   public static void deleteLoadsAndUpdateMetadata(CarbonTable carbonTable, boolean isForceDeletion,
@@ -1054,24 +1054,24 @@
     CarbonLockUtil.deleteExpiredSegmentLockFiles(carbonTable);
     if (isLoadDeletionRequired(metadataDetails)) {
       AbsoluteTableIdentifier identifier = carbonTable.getAbsoluteTableIdentifier();
-      boolean updationCompletionStatus = false;
+      boolean updateCompletionStatus = false;
       LoadMetadataDetails[] newAddedLoadHistoryList = null;
       ReturnTuple tuple =
-          isUpdationRequired(isForceDeletion, carbonTable, identifier, metadataDetails);
+          isUpdateRequired(isForceDeletion, carbonTable, identifier, metadataDetails);
       if (tuple.isUpdateRequired) {
         ICarbonLock carbonTableStatusLock =
             CarbonLockFactory.getCarbonLockObj(identifier, LockUsage.TABLE_STATUS_LOCK);
         boolean locked = false;
         try {
-          // Update load metadate file after cleaning deleted nodes
+          // Update load metadata file after cleaning deleted nodes
           locked = carbonTableStatusLock.lockWithRetries();
           if (locked) {
             LOG.info("Table status lock has been successfully acquired.");
-            // Again read status and check to verify updation required or not.
+            // Again read status and check to verify update required or not.
             LoadMetadataDetails[] details =
                 SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath());
             ReturnTuple tuple2 =
-                isUpdationRequired(isForceDeletion, carbonTable, identifier, details);
+                isUpdateRequired(isForceDeletion, carbonTable, identifier, details);
             if (!tuple2.isUpdateRequired) {
               return;
             }
@@ -1110,7 +1110,7 @@
                   CarbonTablePath.getTableStatusFilePath(identifier.getTablePath()),
                   latestStatus.toArray(new LoadMetadataDetails[0]));
             }
-            updationCompletionStatus = true;
+            updateCompletionStatus = true;
           } else {
             String dbName = identifier.getCarbonTableIdentifier().getDatabaseName();
             String tableName = identifier.getCarbonTableIdentifier().getTableName();
@@ -1125,7 +1125,7 @@
           if (locked) {
             CarbonLockUtil.fileUnlock(carbonTableStatusLock, LockUsage.TABLE_STATUS_LOCK);
           }
-          if (updationCompletionStatus) {
+          if (updateCompletionStatus) {
             DeleteLoadFolders
                 .physicalFactAndMeasureMetadataDeletion(carbonTable, newAddedLoadHistoryList,
                     isForceDeletion, partitionSpecs);
@@ -1141,7 +1141,7 @@
         carbonTable.getAbsoluteTableIdentifier(), LockUsage.TABLE_STATUS_LOCK);
     boolean locked = false;
     try {
-      // Update load metadate file after cleaning deleted nodes
+      // Update load metadata file after cleaning deleted nodes
       locked = carbonTableStatusLock.lockWithRetries();
       if (locked) {
         LOG.info("Table status lock has been successfully acquired.");
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
index d547c3d..f9a3ee7 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
@@ -120,7 +120,7 @@
    */
   private void updateUpdateDetails(String updateVersion) {
     if (updateVersion != null) {
-      List<SegmentUpdateDetails> newupdateDetails = new ArrayList<>();
+      List<SegmentUpdateDetails> newUpdateDetails = new ArrayList<>();
       for (SegmentUpdateDetails updateDetail : updateDetails) {
         if (updateDetail.getDeltaFileStamps() != null) {
           if (updateDetail.getDeltaFileStamps().contains(updateVersion)) {
@@ -128,14 +128,14 @@
             set.add(updateVersion);
             updateDetail.setDeltaFileStamps(set);
             updateDetail.setSegmentStatus(SegmentStatus.SUCCESS);
-            newupdateDetails.add(updateDetail);
+            newUpdateDetails.add(updateDetail);
           }
         } else if (updateDetail.getDeleteDeltaStartTimestamp().equalsIgnoreCase(updateVersion)) {
           updateDetail.setSegmentStatus(SegmentStatus.SUCCESS);
-          newupdateDetails.add(updateDetail);
+          newUpdateDetails.add(updateDetail);
         }
       }
-      updateDetails = newupdateDetails.toArray(new SegmentUpdateDetails[0]);
+      updateDetails = newUpdateDetails.toArray(new SegmentUpdateDetails[0]);
     }
   }
 
@@ -169,7 +169,7 @@
 
   /**
    *
-   * @param key will be like (segid/blockname)  0/0-0-5464654654654
+   * @param key will be like (segmentId/blockName)  0/0-0-5464654654654
    * @return
    */
   public SegmentUpdateDetails getDetailsForABlock(String key) {
@@ -203,7 +203,7 @@
   }
 
   /**
-   * This will return the lock object used to lock the table update status file before updation.
+   * This will return the lock object used to lock the table update status file before updating.
    *
    * @return
    */
@@ -269,8 +269,8 @@
       }
     });
 
-    for (CarbonFile cfile : files) {
-      updatedDeltaFilesList.add(cfile.getCanonicalPath());
+    for (CarbonFile file : files) {
+      updatedDeltaFilesList.add(file.getCanonicalPath());
     }
 
     return updatedDeltaFilesList;
@@ -293,11 +293,11 @@
   private List<String> getDeltaFiles(String blockPath, String segment, String extension) {
     Path path = new Path(blockPath);
     String completeBlockName = path.getName();
-    String blockNameWithoutExtn =
+    String blockNameWithoutExtension =
         completeBlockName.substring(0, completeBlockName.lastIndexOf('.'));
     //blockName without timestamp
     final String blockNameFromTuple =
-        blockNameWithoutExtn.substring(0, blockNameWithoutExtn.lastIndexOf("-"));
+        blockNameWithoutExtension.substring(0, blockNameWithoutExtension.lastIndexOf("-"));
     return getDeltaFiles(path.getParent().toString(), blockNameFromTuple, extension, segment);
   }
 
@@ -362,7 +362,7 @@
               new StringBuilder(blockDir).append(CarbonCommonConstants.FILE_SEPARATOR)
                   .append(block.getBlockName()).append("-")
                   .append(block.getDeleteDeltaStartTimestamp()).append(extension).toString());
-          // If deltatimestamps list has data then it has multiple delta file so construct the file
+          // If delta timestamp list has data then it has multiple delta file so construct the file
           // directly with list of deltas with out listing
         } else if (block.getDeltaFileStamps() != null && block.getDeltaFileStamps().size() > 0) {
           for (String delta : block.getDeltaFileStamps()) {
@@ -372,7 +372,7 @@
                     .toString());
           }
         } else {
-          // It is for backward compatability.It lists the files.
+          // It is for backward compatibility.It lists the files.
           return getFilePaths(blockDir, blockNameFromTuple, extension, deleteFileList,
               deltaStartTimestamp, deltaEndTimeStamp);
         }
@@ -397,8 +397,8 @@
         }
       });
       deltaList = new ArrayList<>(files.length);
-      for (CarbonFile cfile : files) {
-        deltaList.add(cfile.getCanonicalPath());
+      for (CarbonFile file : files) {
+        deltaList.add(file.getCanonicalPath());
       }
       segmentDeleteDeltaListMap.put(blockDir, deltaList);
     }
@@ -469,7 +469,7 @@
   /**
    * Returns all update delta files of specified Segment.
    *
-   * @param loadMetadataDetail metadatadetails of segment
+   * @param loadMetadataDetail metadata details of segment
    * @param validUpdateFiles if true then only the valid range files will be returned.
    * @return
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/StageInputCollector.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/StageInputCollector.java
index f8e590c..0a3c35d 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/StageInputCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/StageInputCollector.java
@@ -38,7 +38,7 @@
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 
-import static org.apache.carbondata.core.util.path.CarbonTablePath.SUCCESS_FILE_SUBFIX;
+import static org.apache.carbondata.core.util.path.CarbonTablePath.SUCCESS_FILE_SUFFIX;
 
 import com.google.gson.Gson;
 import org.apache.commons.io.IOUtils;
@@ -89,10 +89,10 @@
       CarbonFile[] allFiles = dir.listFiles();
       Map<String, CarbonFile> map = new HashMap<>();
       Arrays.stream(allFiles)
-          .filter(file -> file.getName().endsWith(SUCCESS_FILE_SUBFIX))
+          .filter(file -> file.getName().endsWith(SUCCESS_FILE_SUFFIX))
           .forEach(file -> map.put(file.getName().substring(0, file.getName().indexOf(".")), file));
       Arrays.stream(allFiles)
-          .filter(file -> !file.getName().endsWith(SUCCESS_FILE_SUBFIX))
+          .filter(file -> !file.getName().endsWith(SUCCESS_FILE_SUFFIX))
           .filter(file -> map.containsKey(file.getName()))
           .forEach(carbonFile -> {
             stageInputList.add(carbonFile);
diff --git a/core/src/main/java/org/apache/carbondata/core/stream/StreamPruner.java b/core/src/main/java/org/apache/carbondata/core/stream/StreamPruner.java
index eee2f10..f8ee3b8 100644
--- a/core/src/main/java/org/apache/carbondata/core/stream/StreamPruner.java
+++ b/core/src/main/java/org/apache/carbondata/core/stream/StreamPruner.java
@@ -33,7 +33,7 @@
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.reader.CarbonIndexFileReader;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.CarbonMetadataUtil;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
@@ -43,7 +43,7 @@
 public class StreamPruner {
 
   private CarbonTable carbonTable;
-  private FilterExecuter filterExecuter;
+  private FilterExecutor filterExecutor;
 
   private int totalFileNums = 0;
 
@@ -66,13 +66,13 @@
           carbonTable.getTableInfo().getFactTable().getListOfColumns();
       // initial filter executor
       SegmentProperties segmentProperties = new SegmentProperties(listOfColumns);
-      filterExecuter = FilterUtil.getFilterExecuterTree(
+      filterExecutor = FilterUtil.getFilterExecutorTree(
           filterExp, segmentProperties, null, minMaxCacheColumns, false);
     }
   }
 
   public List<StreamFile> prune(List<Segment> segments) throws IOException {
-    if (filterExecuter == null) {
+    if (filterExecutor == null) {
       // if filter is null, list all steam files
       return listAllStreamFiles(segments, false);
     } else {
@@ -95,7 +95,7 @@
     }
     byte[][] maxValue = streamFile.getMinMaxIndex().getMaxValues();
     byte[][] minValue = streamFile.getMinMaxIndex().getMinValues();
-    BitSet bitSet = filterExecuter
+    BitSet bitSet = filterExecutor
         .isScanRequired(maxValue, minValue, streamFile.getMinMaxIndex().getIsMinMaxSet());
     if (!bitSet.isEmpty()) {
       return true;
diff --git a/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java b/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
index 5e505b3..5c0422c 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
@@ -320,10 +320,10 @@
   }
 
   private List<ParentColumnTableRelation> fromThriftToWrapperParentTableColumnRelations(
-      List<org.apache.carbondata.format.ParentColumnTableRelation> thirftParentColumnRelation) {
+      List<org.apache.carbondata.format.ParentColumnTableRelation> thriftParentColumnRelation) {
     List<ParentColumnTableRelation> parentColumnTableRelationList = new ArrayList<>();
     for (org.apache.carbondata.format.ParentColumnTableRelation carbonTableRelation :
-        thirftParentColumnRelation) {
+        thriftParentColumnRelation) {
       RelationIdentifier relationIdentifier =
           new RelationIdentifier(carbonTableRelation.getRelationIdentifier().getDatabaseName(),
               carbonTableRelation.getRelationIdentifier().getTableName(),
diff --git a/core/src/main/java/org/apache/carbondata/core/util/BlockletIndexUtil.java b/core/src/main/java/org/apache/carbondata/core/util/BlockletIndexUtil.java
index a4d3c12..32dfb4c 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/BlockletIndexUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/BlockletIndexUtil.java
@@ -146,7 +146,7 @@
 
   /**
    * This method will create file name to block Meta Info Mapping. This method will reduce the
-   * number of namenode calls and using this method one namenode will fetch 1000 entries
+   * number of nameNode calls and using this method one namenode will fetch 1000 entries
    *
    * @param segmentFilePath
    * @return
@@ -431,7 +431,7 @@
       } else {
         // check if all the filter dimensions are cached
         for (CarbonDimension filterDimension : filterDimensions) {
-          // complex dimensions are not allwed to be specified in COLUMN_META_CACHE property, so
+          // complex dimensions are not allowed to be specified in COLUMN_META_CACHE property, so
           // cannot validate for complex columns
           if (filterDimension.isComplex()) {
             continue;
diff --git a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
index 1cfeaaa..cbfe82e 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
@@ -87,19 +87,19 @@
 
   /**
    * convert number in byte to more readable format
-   * @param sizeInbyte
+   * @param sizeInByte
    * @return
    */
-  public static String convertByteToReadable(long sizeInbyte) {
+  public static String convertByteToReadable(long sizeInByte) {
 
     String readableSize;
-    if (sizeInbyte < CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR) {
-      readableSize = sizeInbyte + " Byte";
-    } else if (sizeInbyte < CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR *
+    if (sizeInByte < CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR) {
+      readableSize = sizeInByte + " Byte";
+    } else if (sizeInByte < CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR *
             CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR) {
-      readableSize = sizeInbyte / CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR + " KB";
+      readableSize = sizeInByte / CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR + " KB";
     } else {
-      readableSize = sizeInbyte / CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR /
+      readableSize = sizeInByte / CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR /
               CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR + " MB";
     }
     return readableSize;
@@ -155,7 +155,7 @@
         long diff = lw ^ rw;
 
         if (diff != 0) {
-          if (!CarbonUnsafe.ISLITTLEENDIAN) {
+          if (!CarbonUnsafe.IS_LITTLE_ENDIAN) {
             return lessThanUnsigned(lw, rw) ? -1 : 1;
           }
 
@@ -230,7 +230,7 @@
           long diff = lw ^ rw;
 
           if (diff != 0) {
-            if (!CarbonUnsafe.ISLITTLEENDIAN) {
+            if (!CarbonUnsafe.IS_LITTLE_ENDIAN) {
               return lessThanUnsigned(lw, rw) ? -1 : 1;
             }
 
@@ -351,7 +351,7 @@
   }
 
   /**
-   * Stirng => byte[]
+   * String => byte[]
    *
    * @param s
    * @return
@@ -443,7 +443,7 @@
     }
     short n = 0;
     if (CarbonUnsafe.getUnsafe() != null) {
-      if (CarbonUnsafe.ISLITTLEENDIAN) {
+      if (CarbonUnsafe.IS_LITTLE_ENDIAN) {
         n = Short.reverseBytes(
             CarbonUnsafe.getUnsafe().getShort(bytes, offset + CarbonUnsafe.BYTE_ARRAY_OFFSET));
       } else {
@@ -513,7 +513,7 @@
     }
     int n = 0;
     if (CarbonUnsafe.getUnsafe() != null) {
-      if (CarbonUnsafe.ISLITTLEENDIAN) {
+      if (CarbonUnsafe.IS_LITTLE_ENDIAN) {
         n = Integer.reverseBytes(
             CarbonUnsafe.getUnsafe().getInt(bytes, offset + CarbonUnsafe.BYTE_ARRAY_OFFSET));
       } else {
@@ -582,7 +582,7 @@
     }
     long l = 0;
     if (CarbonUnsafe.getUnsafe() != null) {
-      if (CarbonUnsafe.ISLITTLEENDIAN) {
+      if (CarbonUnsafe.IS_LITTLE_ENDIAN) {
         l = Long.reverseBytes(
             CarbonUnsafe.getUnsafe().getLong(bytes, offset + CarbonUnsafe.BYTE_ARRAY_OFFSET));
       } else {
@@ -626,7 +626,7 @@
               + " byte array");
     }
     if (CarbonUnsafe.getUnsafe() != null) {
-      if (CarbonUnsafe.ISLITTLEENDIAN) {
+      if (CarbonUnsafe.IS_LITTLE_ENDIAN) {
         val = Integer.reverseBytes(val);
       }
       CarbonUnsafe.getUnsafe().putInt(bytes, offset + CarbonUnsafe.BYTE_ARRAY_OFFSET, val);
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
index a3a6a4e..42d7374 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
@@ -40,7 +40,7 @@
   }
 
   @Override
-  public void recordLoadCsvfilesToDfTime() {
+  public void recordLoadCsvFilesToDfTime() {
 
   }
 
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
index ed06c73..7624cfc 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
@@ -24,7 +24,7 @@
 import org.apache.log4j.Logger;
 
 /**
- * A util which provide methods used to record time information druing data loading.
+ * A util which provide methods used to record time information during data loading.
  */
 public class CarbonLoadStatisticsImpl implements LoadStatistics {
   private CarbonLoadStatisticsImpl() {
@@ -46,8 +46,8 @@
    *threads, who does the same thing, LET - EST is the cost time of doing one thing using
    *multiple thread.
  */
-  private long loadCsvfilesToDfStartTime = 0;
-  private long loadCsvfilesToDfCostTime = 0;
+  private long loadCsvFilesToDfStartTime = 0;
+  private long loadCsvFilesToDfCostTime = 0;
   private long dicShuffleAndWriteFileTotalStartTime = 0;
 
   //LRU cache load one time
@@ -100,13 +100,13 @@
     }
   }
 
-  public void recordLoadCsvfilesToDfTime() {
-    long loadCsvfilesToDfTimePoint = System.currentTimeMillis();
-    if (0 == loadCsvfilesToDfStartTime) {
-      loadCsvfilesToDfStartTime = loadCsvfilesToDfTimePoint;
+  public void recordLoadCsvFilesToDfTime() {
+    long loadCsvFilesToDfTimePoint = System.currentTimeMillis();
+    if (0 == loadCsvFilesToDfStartTime) {
+      loadCsvFilesToDfStartTime = loadCsvFilesToDfTimePoint;
     }
-    if (loadCsvfilesToDfTimePoint - loadCsvfilesToDfStartTime > loadCsvfilesToDfCostTime) {
-      loadCsvfilesToDfCostTime = loadCsvfilesToDfTimePoint - loadCsvfilesToDfStartTime;
+    if (loadCsvFilesToDfTimePoint - loadCsvFilesToDfStartTime > loadCsvFilesToDfCostTime) {
+      loadCsvFilesToDfCostTime = loadCsvFilesToDfTimePoint - loadCsvFilesToDfStartTime;
     }
   }
 
@@ -228,8 +228,8 @@
     this.totalRecords = totalRecords;
   }
 
-  private double getLoadCsvfilesToDfTime() {
-    return loadCsvfilesToDfCostTime / 1000.0;
+  private double getLoadCsvFilesToDfTime() {
+    return loadCsvFilesToDfCostTime / 1000.0;
   }
 
   private double getDictionaryValuesTotalTime(String partitionID) {
@@ -288,7 +288,7 @@
   }
 
   private double getTotalTime(String partitionID) {
-    this.totalTime = getLoadCsvfilesToDfTime() +
+    this.totalTime = getLoadCsvFilesToDfTime() +
         getLruCacheLoadTime() + getDictionaryValuesTotalTime(partitionID) +
         getDictionaryValue2MdkAdd2FileTime(partitionID);
     return totalTime;
@@ -296,9 +296,9 @@
 
   //Print the statistics information
   private void printDicGenStatisticsInfo() {
-    double loadCsvfilesToDfTime = getLoadCsvfilesToDfTime();
+    double loadCsvFilesToDfTime = getLoadCsvFilesToDfTime();
     LOGGER.info("STAGE 1 ->Load csv to DataFrame and generate" +
-            " block distinct values: " + loadCsvfilesToDfTime + "(s)");
+            " block distinct values: " + loadCsvFilesToDfTime + "(s)");
   }
 
   private void printLruCacheLoadTimeInfo() {
@@ -377,8 +377,8 @@
 
   //Reset the load statistics values
   private void resetLoadStatistics() {
-    loadCsvfilesToDfStartTime = 0;
-    loadCsvfilesToDfCostTime = 0;
+    loadCsvFilesToDfStartTime = 0;
+    loadCsvFilesToDfCostTime = 0;
     dicShuffleAndWriteFileTotalStartTime = 0;
     lruCacheLoadTime = 0;
     totalRecords = 0;
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
index d1bef1a..441130d 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
@@ -56,7 +56,7 @@
 import org.apache.log4j.Logger;
 
 /**
- * Util class to convert to thrift metdata classes
+ * Util class to convert to thrift metadata classes
  */
 public class CarbonMetadataUtil {
 
@@ -93,13 +93,13 @@
    * Below method prepares the file footer object for carbon data file version 3
    *
    * @param infoList
-   * @param blockletIndexs
+   * @param blockletIndexes
    * @param numberOfColumns
    * @return FileFooter
    */
   public static FileFooter3 convertFileFooterVersion3(List<BlockletInfo3> infoList,
-      List<BlockletIndex> blockletIndexs, int numberOfColumns) {
-    FileFooter3 footer = getFileFooter3(infoList, blockletIndexs, numberOfColumns);
+      List<BlockletIndex> blockletIndexes, int numberOfColumns) {
+    FileFooter3 footer = getFileFooter3(infoList, blockletIndexes, numberOfColumns);
     for (BlockletInfo3 info : infoList) {
       footer.addToBlocklet_info_list3(info);
     }
@@ -110,19 +110,19 @@
    * Below method will be used to get the file footer object
    *
    * @param infoList         blocklet info
-   * @param blockletIndexs
+   * @param blockletIndexes
    * @param numberOfColumns
    * @return file footer
    */
   private static FileFooter3 getFileFooter3(List<BlockletInfo3> infoList,
-      List<BlockletIndex> blockletIndexs, int numberOfColumns) {
+      List<BlockletIndex> blockletIndexes, int numberOfColumns) {
     SegmentInfo segmentInfo = new SegmentInfo();
     segmentInfo.setNum_cols(numberOfColumns);
     segmentInfo.setColumn_cardinalities(dummyCardinality);
     FileFooter3 footer = new FileFooter3();
     footer.setNum_rows(getNumberOfRowForFooter(infoList));
     footer.setSegment_info(segmentInfo);
-    for (BlockletIndex info : blockletIndexs) {
+    for (BlockletIndex info : blockletIndexes) {
       footer.addToBlocklet_index_list(info);
     }
     return footer;
@@ -409,7 +409,7 @@
       blockIndex.setFile_name(blockIndexInfo.getFileName());
       blockIndex.setBlock_index(getBlockletIndex(blockIndexInfo.getBlockletIndex()));
       if (blockIndexInfo.getBlockletInfo() != null) {
-        blockIndex.setBlocklet_info(getBlocletInfo3(blockIndexInfo.getBlockletInfo()));
+        blockIndex.setBlocklet_info(getBlockletInfo3(blockIndexInfo.getBlockletInfo()));
       }
       blockIndex.setFile_size(blockIndexInfo.getFileSize());
       thriftBlockIndexList.add(blockIndex);
@@ -417,7 +417,7 @@
     return thriftBlockIndexList;
   }
 
-  public static BlockletInfo3 getBlocletInfo3(
+  public static BlockletInfo3 getBlockletInfo3(
       org.apache.carbondata.core.metadata.blocklet.BlockletInfo blockletInfo) {
     List<Long> dimensionChunkOffsets = blockletInfo.getDimensionChunkOffsets();
     dimensionChunkOffsets.addAll(blockletInfo.getMeasureChunkOffsets());
@@ -462,7 +462,7 @@
   }
 
   /**
-   * return DataChunk3 for the dimension column (specifed by `columnIndex`)
+   * return DataChunk3 for the dimension column (specified by `columnIndex`)
    * in `encodedTablePageList`
    */
   public static DataChunk3 getDimensionDataChunk3(EncodedBlocklet encodedBlocklet,
@@ -479,7 +479,7 @@
   }
 
   /**
-   * return DataChunk3 for the measure column (specifed by `columnIndex`)
+   * return DataChunk3 for the measure column (specified by `columnIndex`)
    * in `encodedTablePageList`
    */
   public static DataChunk3 getMeasureDataChunk3(EncodedBlocklet encodedBlocklet, int columnIndex) {
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
index cc1af6b..5b1b2cf 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -86,7 +86,7 @@
   /**
    * class instance.
    */
-  private static final CarbonProperties CARBONPROPERTIESINSTANCE = new CarbonProperties();
+  private static final CarbonProperties INSTANCE = new CarbonProperties();
 
   /**
    * Properties
@@ -116,7 +116,7 @@
    * @return carbon properties instance
    */
   public static CarbonProperties getInstance() {
-    return CARBONPROPERTIESINSTANCE;
+    return INSTANCE;
   }
 
   /**
@@ -690,23 +690,23 @@
   private void validateNumberOfColumnPerIORead() {
     String numberOfColumnPerIOString = carbonProperties
         .getProperty(NUMBER_OF_COLUMN_TO_READ_IN_IO,
-            CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE);
+            CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE);
     try {
       short numberOfColumnPerIO = Short.parseShort(numberOfColumnPerIOString);
       if (numberOfColumnPerIO < CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_MIN
           || numberOfColumnPerIO > CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_MAX) {
         LOGGER.info("The Number Of pages per blocklet column value \"" + numberOfColumnPerIOString
             + "\" is invalid. Using the default value \""
-            + CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE);
+            + CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE);
         carbonProperties.setProperty(NUMBER_OF_COLUMN_TO_READ_IN_IO,
-            CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE);
+            CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE);
       }
     } catch (NumberFormatException e) {
       LOGGER.info("The Number Of pages per blocklet column value \"" + numberOfColumnPerIOString
           + "\" is invalid. Using the default value \""
-          + CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE);
+          + CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE);
       carbonProperties.setProperty(NUMBER_OF_COLUMN_TO_READ_IN_IO,
-          CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULTVALUE);
+          CarbonV3DataFormatConstants.NUMBER_OF_COLUMN_TO_READ_IN_IO_DEFAULT_VALUE);
     }
   }
 
@@ -1031,7 +1031,7 @@
   }
 
   /**
-   * gettting the unmerged segment numbers to be merged.
+   * getting the unmerged segment numbers to be merged.
    *
    * @return corrected value of unmerged segments to be merged
    */
@@ -1182,7 +1182,7 @@
       try {
         batchSize = Integer.parseInt(batchSizeString);
       } catch (NumberFormatException ne) {
-        LOGGER.error("Invalid inmemory records size. Using default value");
+        LOGGER.error("Invalid in-memory records size. Using default value");
         batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
       }
     } else {
@@ -1250,7 +1250,7 @@
   }
 
   /**
-   * Returns configured update deleta files value for IUD compaction
+   * Returns configured update delta files value for IUD compaction
    *
    * @return numberOfDeltaFilesThreshold
    */
@@ -1280,7 +1280,7 @@
   }
 
   /**
-   * Returns configured delete deleta files value for IUD compaction
+   * Returns configured delete delta files value for IUD compaction
    *
    * @return numberOfDeltaFilesThreshold
    */
@@ -1889,7 +1889,7 @@
               CarbonCommonConstants.CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT,
               CarbonCommonConstants.CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT_DEFAULT));
       if (driverPruningMultiThreadEnableFilesCount <= 0) {
-        LOGGER.info("The driver prunning multithread enable files count value \""
+        LOGGER.info("The driver pruning multi-thread enable files count value \""
             + driverPruningMultiThreadEnableFilesCount
             + "\" is invalid. Using the default value \""
             + CarbonCommonConstants.CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT_DEFAULT);
@@ -1897,7 +1897,7 @@
             .CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT_DEFAULT);
       }
     } catch (NumberFormatException e) {
-      LOGGER.info("The driver prunning multithread enable files count value " +
+      LOGGER.info("The driver pruning multi-thread enable files count value " +
               "is invalid. Using the default value \""
           + CarbonCommonConstants.CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT_DEFAULT);
       driverPruningMultiThreadEnableFilesCount = Integer.parseInt(CarbonCommonConstants
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index 55864aa..9643643 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -288,8 +288,8 @@
     return newDimsC;
   }
 
-  private static int getBitLengthFullyFilled(int dimlens) {
-    int bitsLength = Long.toBinaryString(dimlens).length();
+  private static int getBitLengthFullyFilled(int dimensionLens) {
+    int bitsLength = Long.toBinaryString(dimensionLens).length();
     int div = bitsLength / 8;
     int mod = bitsLength % 8;
     if (mod > 0) {
@@ -513,12 +513,12 @@
    * @param currentIndex
    * @param dimColumnDataChunk
    * @param compareValue
-   * @param numerOfRows
+   * @param numberOfRows
    * @return index value
    */
   public static int nextGreaterValueToTarget(int currentIndex,
-      DimensionColumnPage dimColumnDataChunk, byte[] compareValue, int numerOfRows) {
-    while (currentIndex + 1 < numerOfRows
+      DimensionColumnPage dimColumnDataChunk, byte[] compareValue, int numberOfRows) {
+    while (currentIndex + 1 < numberOfRows
         && dimColumnDataChunk.compareTo(currentIndex + 1, compareValue) <= 0) {
       ++currentIndex;
     }
@@ -866,14 +866,14 @@
   }
 
   /**
-   * Below method will be used to read the data file matadata
+   * Below method will be used to read the data file metadata
    */
   public static DataFileFooter readMetadataFile(TableBlockInfo tableBlockInfo) throws IOException {
     return getDataFileFooter(tableBlockInfo, false);
   }
 
   /**
-   * Below method will be used to read the data file matadata
+   * Below method will be used to read the data file metadata
    *
    * @param tableBlockInfo
    * @param forceReadDataFileFooter flag to decide whether to read the footer of
@@ -1008,9 +1008,9 @@
   public static boolean[] identifyDimensionType(List<CarbonDimension> tableDimensionList) {
     List<Boolean> isDictionaryDimensions = new ArrayList<Boolean>();
     for (CarbonDimension carbonDimension : tableDimensionList) {
-      List<CarbonDimension> childs = carbonDimension.getListOfChildDimensions();
-      //assuming complex dimensions will always be atlast
-      if (null != childs && childs.size() > 0) {
+      List<CarbonDimension> children = carbonDimension.getListOfChildDimensions();
+      //assuming complex dimensions will always be at last
+      if (null != children && children.size() > 0) {
         break;
       }
       if (carbonDimension.getDataType() == DataTypes.DATE) {
@@ -1112,7 +1112,7 @@
       // ensure that if the dimensions columnName is same as the block columnName and the dimension
       // columnId is the same as dimensions columnName then it's a valid column to be scanned.
       if (dimensionToBeSearched.getColumnId().equalsIgnoreCase(blockDimension.getColumnId())
-          || blockDimension.isColmatchBasedOnId(dimensionToBeSearched)) {
+          || blockDimension.isColumnMatchBasedOnId(dimensionToBeSearched)) {
         currentBlockDimension = blockDimension;
         break;
       }
@@ -1135,7 +1135,7 @@
       // ensure that if the measures columnName is same as the block columnName and the measures
       // columnId is the same as measures columnName then it's a valid column to be scanned.
       if (measureToBeSearched.getColumnId().equalsIgnoreCase(blockMeasure.getColumnId())
-          || blockMeasure.isColmatchBasedOnId(measureToBeSearched)) {
+          || blockMeasure.isColumnMatchBasedOnId(measureToBeSearched)) {
         currentBlockMeasure = blockMeasure;
         break;
       }
@@ -1146,20 +1146,20 @@
   public static List<ColumnSchema> getColumnSchemaList(List<CarbonDimension> carbonDimensionsList,
       List<CarbonMeasure> carbonMeasureList) {
     List<ColumnSchema> wrapperColumnSchemaList = new ArrayList<ColumnSchema>();
-    fillCollumnSchemaListForComplexDims(carbonDimensionsList, wrapperColumnSchemaList);
+    fillColumnSchemaListForComplexDims(carbonDimensionsList, wrapperColumnSchemaList);
     for (CarbonMeasure carbonMeasure : carbonMeasureList) {
       wrapperColumnSchemaList.add(carbonMeasure.getColumnSchema());
     }
     return wrapperColumnSchemaList;
   }
 
-  private static void fillCollumnSchemaListForComplexDims(
+  private static void fillColumnSchemaListForComplexDims(
       List<CarbonDimension> carbonDimensionsList, List<ColumnSchema> wrapperColumnSchemaList) {
     for (CarbonDimension carbonDimension : carbonDimensionsList) {
       wrapperColumnSchemaList.add(carbonDimension.getColumnSchema());
       List<CarbonDimension> childDims = carbonDimension.getListOfChildDimensions();
       if (null != childDims && childDims.size() > 0) {
-        fillCollumnSchemaListForComplexDims(childDims, wrapperColumnSchemaList);
+        fillColumnSchemaListForComplexDims(childDims, wrapperColumnSchemaList);
       }
     }
   }
@@ -1351,13 +1351,13 @@
     if (values == null || values.isEmpty()) {
       return "";
     }
-    StringBuilder segmentStringbuilder = new StringBuilder();
+    StringBuilder segmentStringBuilder = new StringBuilder();
     for (int i = 0; i < values.size() - 1; i++) {
-      segmentStringbuilder.append(values.get(i));
-      segmentStringbuilder.append(",");
+      segmentStringBuilder.append(values.get(i));
+      segmentStringBuilder.append(",");
     }
-    segmentStringbuilder.append(values.get(values.size() - 1));
-    return segmentStringbuilder.toString();
+    segmentStringBuilder.append(values.get(values.size() - 1));
+    return segmentStringBuilder.toString();
   }
 
   /**
@@ -1587,13 +1587,13 @@
 
   /**
    * @param invalidBlockVOForSegmentId
-   * @param updateStatusMngr
+   * @param updateStatusManager
    * @return
    */
   public static boolean isInvalidTableBlock(String segmentId, String filePath,
-      UpdateVO invalidBlockVOForSegmentId, SegmentUpdateStatusManager updateStatusMngr) {
+      UpdateVO invalidBlockVOForSegmentId, SegmentUpdateStatusManager updateStatusManager) {
 
-    if (!updateStatusMngr.isBlockValid(segmentId,
+    if (!updateStatusManager.isBlockValid(segmentId,
         CarbonTablePath.getCarbonDataFileName(filePath) +
             CarbonTablePath.getCarbonDataExtension())) {
       return true;
@@ -1614,7 +1614,7 @@
           && blockTimeStamp > invalidBlockVOForSegmentId.getLatestUpdateTimestamp()) {
         return true;
       }
-      // for 1st time starttime stamp will be empty so need to consider fact time stamp.
+      // for 1st time, start timestamp will be empty, so need to consider fact timestamp.
       if (null == invalidBlockVOForSegmentId.getUpdateDeltaStartTimestamp()
           && blockTimeStamp > invalidBlockVOForSegmentId.getFactTimestamp()) {
         return true;
@@ -1785,29 +1785,29 @@
   }
 
   /**
-   * Converts Tableinfo object to json multi string objects of size 4000
+   * Converts TableInfo object to json multi string objects of size 4000
    * @param tableInfo
-   * @param seperator separator between each string
+   * @param separator separator between each string
    * @param quote Quote to be used for string
    * @param prefix Prefix to be added before generated string
    * @return
    */
-  public static String convertToMultiGsonStrings(TableInfo tableInfo, String seperator,
+  public static String convertToMultiGsonStrings(TableInfo tableInfo, String separator,
       String quote, String prefix) {
     Gson gson = new Gson();
     String schemaString = gson.toJson(tableInfo);
-    return splitSchemaStringToMultiString(seperator, quote, prefix, schemaString);
+    return splitSchemaStringToMultiString(separator, quote, prefix, schemaString);
   }
 
   /**
    * Converts Json String to multi string objects of size 4000
    * @param schemaString Json string
-   * @param seperator separator between each string
+   * @param separator separator between each string
    * @param quote Quote to be used for string
    * @param prefix Prefix to be added before generated string
    * @return
    */
-  public static String splitSchemaStringToMultiString(String seperator, String quote,
+  public static String splitSchemaStringToMultiString(String separator, String quote,
       String prefix, String schemaString) {
     int schemaLen = schemaString.length();
     int splitLen = 4000;
@@ -1817,7 +1817,7 @@
     }
     StringBuilder builder =
         new StringBuilder(prefix).append(quote).append("carbonSchemaPartsNo").append(quote)
-            .append(seperator).append("'").append(parts).append("',");
+            .append(separator).append("'").append(parts).append("',");
     int runningLen = 0;
     int endLen = schemaLen > splitLen ? splitLen : schemaLen;
     for (int i = 0; i < parts; i++) {
@@ -1826,7 +1826,7 @@
           endLen = schemaLen % splitLen;
         }
       }
-      builder.append(quote).append("carbonSchema").append(i).append(quote).append(seperator);
+      builder.append(quote).append("carbonSchema").append(i).append(quote).append(separator);
       builder.append("'").append(schemaString.substring(runningLen, runningLen + endLen))
           .append("'");
       if (i < parts - 1) {
@@ -1838,7 +1838,7 @@
   }
 
   /**
-   * Converts Tableinfo object to json multi string objects  of size 4000 and stored in map
+   * Converts TableInfo object to json multi string objects  of size 4000 and stored in map
    * @param tableInfo
    * @return
    */
@@ -1901,17 +1901,17 @@
     Gson gson = gsonBuilder.create();
     TableInfo tableInfo = gson.fromJson(builder.toString(), TableInfo.class);
 
-    // The tableInfo is deserialized from GSON string, need to update the scale and
+    // The tableInfo is deserialize from GSON string, need to update the scale and
     // precision if there are any decimal field, because DecimalType is added in Carbon 1.3,
-    // If it is not updated, read compactibility will be break for table generated before Carbon 1.3
+    // If it is not updated, read compatibility will be break for table generated before Carbon 1.3
     updateDecimalType(tableInfo);
     return tableInfo;
   }
 
   // Update decimal type inside `tableInfo` to set scale and precision, if there are any decimal
   private static void updateDecimalType(TableInfo tableInfo) {
-    List<ColumnSchema> deserializedColumns = tableInfo.getFactTable().getListOfColumns();
-    for (ColumnSchema column : deserializedColumns) {
+    List<ColumnSchema> deserializeColumns = tableInfo.getFactTable().getListOfColumns();
+    for (ColumnSchema column : deserializeColumns) {
       DataType dataType = column.getDataType();
       if (DataTypes.isDecimal(dataType)) {
         column.setDataType(DataTypes.createDecimalType(column.getPrecision(), column.getScale()));
@@ -2008,10 +2008,10 @@
   }
 
   static List<ParentColumnTableRelation> fromThriftToWrapperParentTableColumnRelations(
-      List<org.apache.carbondata.format.ParentColumnTableRelation> thirftParentColumnRelation) {
+      List<org.apache.carbondata.format.ParentColumnTableRelation> thriftParentColumnRelation) {
     List<ParentColumnTableRelation> parentColumnTableRelationList = new ArrayList<>();
     for (org.apache.carbondata.format.ParentColumnTableRelation carbonTableRelation :
-        thirftParentColumnRelation) {
+        thriftParentColumnRelation) {
       RelationIdentifier relationIdentifier =
           new RelationIdentifier(carbonTableRelation.getRelationIdentifier().getDatabaseName(),
               carbonTableRelation.getRelationIdentifier().getTableName(),
@@ -2094,7 +2094,7 @@
     CarbonFile[] dataFiles = segment.listFiles();
     CarbonFile latestCarbonFile = null;
     long latestDatafileTimestamp = 0L;
-    // get the latest carbondatafile to get the latest schema in the folder
+    // get the latest carbon data file to get the latest schema in the folder
     for (CarbonFile dataFile : dataFiles) {
       if (dataFile.getName().endsWith(CarbonCommonConstants.FACT_FILE_EXT)
           && dataFile.getLastModifiedTime() > latestDatafileTimestamp) {
@@ -2229,11 +2229,11 @@
 
     SchemaEvolutionEntry schemaEvolutionEntry = new SchemaEvolutionEntry();
     schemaEvolutionEntry.setTimeStamp(System.currentTimeMillis());
-    SchemaEvolution schemaEvol = new SchemaEvolution();
+    SchemaEvolution schemaEvolution = new SchemaEvolution();
     List<SchemaEvolutionEntry> schEntryList = new ArrayList<>();
     schEntryList.add(schemaEvolutionEntry);
-    schemaEvol.setSchemaEvolutionEntryList(schEntryList);
-    tableSchema.setSchemaEvolution(schemaEvol);
+    schemaEvolution.setSchemaEvolutionEntryList(schEntryList);
+    tableSchema.setSchemaEvolution(schemaEvolution);
     return tableSchema;
   }
 
@@ -2370,8 +2370,8 @@
       Boolean updateSize)
       throws IOException {
     Map<String, Long> dataIndexSizeMap = new HashMap<String, Long>();
-    long dataSize = 0L;
-    long indexSize = 0L;
+    long totalDataSize = 0L;
+    long totalIndexSize = 0L;
     long lastUpdateTime = 0L;
     boolean needUpdate = false;
     AbsoluteTableIdentifier identifier = carbonTable.getAbsoluteTableIdentifier();
@@ -2387,7 +2387,7 @@
           lockAcquired = carbonLock.lockWithRetries();
         }
         if (lockAcquired) {
-          LOGGER.debug("Acquired lock for table for table status updation");
+          LOGGER.debug("Acquired lock for table for table status update");
           String metadataPath = carbonTable.getMetadataPath();
           LoadMetadataDetails[] loadMetadataDetails =
               SegmentStatusManager.readLoadMetadata(metadataPath);
@@ -2396,21 +2396,21 @@
             SegmentStatus loadStatus = loadMetadataDetail.getSegmentStatus();
             if (loadStatus == SegmentStatus.SUCCESS || loadStatus ==
                       SegmentStatus.LOAD_PARTIAL_SUCCESS) {
-              String dsize = loadMetadataDetail.getDataSize();
-              String isize = loadMetadataDetail.getIndexSize();
+              String dataSize = loadMetadataDetail.getDataSize();
+              String indexSize = loadMetadataDetail.getIndexSize();
               // If it is old segment, need to calculate data size and index size again
-              if (null == dsize || null == isize) {
+              if (null == dataSize || null == indexSize) {
                 needUpdate = true;
                 LOGGER.debug("It is an old segment, need calculate data size and index size again");
                 HashMap<String, Long> map = CarbonUtil.getDataSizeAndIndexSize(
                     identifier.getTablePath(), loadMetadataDetail.getLoadName());
-                dsize = String.valueOf(map.get(CarbonCommonConstants.CARBON_TOTAL_DATA_SIZE));
-                isize = String.valueOf(map.get(CarbonCommonConstants.CARBON_TOTAL_INDEX_SIZE));
-                loadMetadataDetail.setDataSize(dsize);
-                loadMetadataDetail.setIndexSize(isize);
+                dataSize = String.valueOf(map.get(CarbonCommonConstants.CARBON_TOTAL_DATA_SIZE));
+                indexSize = String.valueOf(map.get(CarbonCommonConstants.CARBON_TOTAL_INDEX_SIZE));
+                loadMetadataDetail.setDataSize(dataSize);
+                loadMetadataDetail.setIndexSize(indexSize);
               }
-              dataSize += Long.parseLong(dsize);
-              indexSize += Long.parseLong(isize);
+              totalDataSize += Long.parseLong(dataSize);
+              totalIndexSize += Long.parseLong(indexSize);
             }
           }
           // If it contains old segment, write new load details
@@ -2426,22 +2426,22 @@
                 FileFactory.getCarbonFile(tableStatusPath).getLastModifiedTime();
           }
           if (!FileFactory.isFileExist(metadataPath)) {
-            dataSize = FileFactory.getDirectorySize(carbonTable.getTablePath());
+            totalDataSize = FileFactory.getDirectorySize(carbonTable.getTablePath());
           }
           dataIndexSizeMap
-              .put(String.valueOf(CarbonCommonConstants.CARBON_TOTAL_DATA_SIZE), dataSize);
+              .put(String.valueOf(CarbonCommonConstants.CARBON_TOTAL_DATA_SIZE), totalDataSize);
           dataIndexSizeMap
-              .put(String.valueOf(CarbonCommonConstants.CARBON_TOTAL_INDEX_SIZE), indexSize);
+              .put(String.valueOf(CarbonCommonConstants.CARBON_TOTAL_INDEX_SIZE), totalIndexSize);
           dataIndexSizeMap
               .put(String.valueOf(CarbonCommonConstants.LAST_UPDATE_TIME), lastUpdateTime);
         } else {
-          LOGGER.error("Not able to acquire the lock for Table status updation for table");
+          LOGGER.error("Not able to acquire the lock for Table status update for table");
         }
       } finally {
         if (carbonLock.unlock()) {
-          LOGGER.debug("Table unlocked successfully after table status updation");
+          LOGGER.debug("Table unlocked successfully after table status update");
         } else {
-          LOGGER.error("Unable to unlock Table lock for table during table status updation");
+          LOGGER.error("Unable to unlock Table lock for table during table status update");
         }
       }
     }
@@ -2521,7 +2521,7 @@
     if (locationMap != null) {
       fileStore.readIndexFiles(FileFactory.getConfiguration());
       Map<String, List<String>> indexFilesMap = fileStore.getIndexFilesMap();
-      // get the size of carbonindex file
+      // get the size of carbon index file
       carbonIndexSize = getCarbonIndexSize(fileStore, locationMap);
       for (Map.Entry<String, List<String>> entry : indexFilesMap.entrySet()) {
         // get the size of carbondata files
@@ -2543,7 +2543,7 @@
   }
 
   /**
-   * Calcuate the index files size of the segment
+   * Calculate the index files size of the segment
    *
    * @param fileStore
    * @param locationMap
@@ -2554,7 +2554,7 @@
     long carbonIndexSize = 0L;
     for (Map.Entry<String, SegmentFileStore.FolderDetails> entry : locationMap.entrySet()) {
       SegmentFileStore.FolderDetails folderDetails = entry.getValue();
-      Set<String> carbonindexFiles = folderDetails.getFiles();
+      Set<String> indexFiles = folderDetails.getFiles();
       String mergeFileName = folderDetails.getMergeFileName();
       if (null != mergeFileName) {
         String mergeIndexPath;
@@ -2567,7 +2567,7 @@
         }
         carbonIndexSize += FileFactory.getCarbonFile(mergeIndexPath).getSize();
       }
-      for (String indexFile : carbonindexFiles) {
+      for (String indexFile : indexFiles) {
         String indexPath;
         if (entry.getValue().isRelative()) {
           indexPath =
@@ -2615,7 +2615,7 @@
   }
 
   /**
-   * Deoce
+   * Decode string to byte array
    * @param objectString
    * @return
    * @throws UnsupportedEncodingException
@@ -2765,7 +2765,7 @@
   }
 
   /**
-   * Generate the blockid as per the block path
+   * Generate the blockId as per the block path
    *
    * @param identifier
    * @param filePath
@@ -2781,7 +2781,7 @@
   }
 
   /**
-   * Generate the blockid as per the block path
+   * Generate the blockId as per the block path
    *
    * @return
    */
@@ -3035,10 +3035,10 @@
       CarbonTable carbonTable) {
     List<ColumnSchema> wrapperColumnSchema = CarbonUtil
         .getColumnSchemaList(carbonTable.getVisibleDimensions(), carbonTable.getVisibleMeasures());
-    boolean islocalDictEnabled = carbonTable.isLocalDictionaryEnabled();
+    boolean isLocalDictEnabled = carbonTable.isLocalDictionaryEnabled();
     // creates a map only if local dictionary is enabled, else map will be null
     Map<String, LocalDictionaryGenerator> columnLocalDictGenMap = new HashMap<>();
-    if (islocalDictEnabled) {
+    if (isLocalDictEnabled) {
       int localDictionaryThreshold = carbonTable.getLocalDictionaryThreshold();
       for (ColumnSchema columnSchema : wrapperColumnSchema) {
         // check whether the column is local dictionary column or not
@@ -3051,7 +3051,7 @@
         }
       }
     }
-    if (islocalDictEnabled) {
+    if (isLocalDictEnabled) {
       if (LOGGER.isDebugEnabled()) {
         LOGGER.debug("Local dictionary is enabled for table: " + carbonTable.getTableUniqueName());
         LOGGER.debug(String.format("Local dictionary threshold for table %s is %d",
@@ -3086,7 +3086,7 @@
     ColumnarFormatVersion version = null;
     SegmentIndexFileStore fileStore = new SegmentIndexFileStore();
     CarbonProperties carbonProperties = CarbonProperties.getInstance();
-    // if the carbontable is support flat folder
+    // if the carbon table is support flat folder
     if (supportFlatFolder) {
       segmentPath = carbonTable.getTablePath();
       if (FileFactory.isFileExist(segmentPath)) {
@@ -3144,7 +3144,7 @@
         }
       }
       // if all valid segments path does not in the system,
-      // then the carbon file verion as default
+      // then the carbon file version as default
       if (version == null) {
         version = CarbonProperties.getInstance().getFormatVersion();
       }
@@ -3176,9 +3176,9 @@
   }
 
   /**
-   * Check whether it is standard table means tablepath has Fact/Part0/Segment_ tail present with
-   * all carbon files. In other cases carbon files present directly under tablepath or
-   * tablepath/partition folder
+   * Check whether it is standard table means table path has Fact/Part0/Segment_ tail present with
+   * all carbon files. In other cases carbon files present directly under table path or
+   * tablePath/partition folder
    * TODO Read segment file and corresponding index file to get the correct carbondata file instead
    * of using this way.
    * @param table
@@ -3190,11 +3190,11 @@
 
   /**
    * This method will form the FallbackEncodedColumnPage from input column page
-   * @param columnPage actual data column page got from encoded columnpage if decoder based fallback
-   * is disabled or newly created columnpage by extracting actual data from dictionary data, if
-   * decoder based fallback is enabled
+   * @param columnPage actual data column page got from encoded column page if decoder based
+   * fallback is disabled or newly created column page by extracting actual data from dictionary
+   * data, if decoder based fallback is enabled
    * @param pageIndex pageIndex
-   * @param columnSpec ColumSpec
+   * @param columnSpec ColumnSpec
    * @return FallbackEncodedColumnPage
    * @throws IOException
    */
@@ -3254,7 +3254,7 @@
 
   /**
    * Below method is to generateUUID (Random Based)
-   * later it will be extened for TimeBased,NameBased
+   * later it will be extended for TimeBased,NameBased
    *
    * @return UUID as String
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CustomIndex.java b/core/src/main/java/org/apache/carbondata/core/util/CustomIndex.java
index 36d0a79..fb60051 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CustomIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CustomIndex.java
@@ -28,7 +28,7 @@
  * An Index implementation class must extend this class and provide the concrete implementation
  * for following abstract methods:
  * 1. Init method to extract and store the sub-properties of index property. Such as index type,
- *    sourcecolumns etc.
+ *    source columns etc.
  * 2. Generate method to generate the row value for the index column from corresponding row values
  *    of its source columns.
  * 3. Query method to process the custom UDF filter queries based on source columns.
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
index 63b634d..3afc5da 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
@@ -51,7 +51,7 @@
   private static final Logger LOGGER =
       LogServiceFactory.getLogService(DataTypeUtil.class.getName());
 
-  private static final ThreadLocal<DateFormat> timeStampformatter = new ThreadLocal<DateFormat>() {
+  private static final ThreadLocal<DateFormat> timestampFormatter = new ThreadLocal<DateFormat>() {
     @Override
     protected DateFormat initialValue() {
       DateFormat dateFormat = new SimpleDateFormat(CarbonProperties.getInstance()
@@ -62,7 +62,7 @@
     }
   };
 
-  private static final ThreadLocal<DateFormat> dateformatter = new ThreadLocal<DateFormat>() {
+  private static final ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
     @Override
     protected DateFormat initialValue() {
       return new SimpleDateFormat(CarbonProperties.getInstance()
@@ -164,7 +164,7 @@
           dateFormatter = new SimpleDateFormat(timeStampFormat);
           dateFormatter.setLenient(false);
         } else {
-          dateFormatter = timeStampformatter.get();
+          dateFormatter = timestampFormatter.get();
         }
         dateToStr = dateFormatter.parse(dimValue);
         return dateToStr.getTime();
@@ -257,11 +257,11 @@
   public static byte[] bigDecimalToByte(BigDecimal num) {
     BigInteger sig = new BigInteger(num.unscaledValue().toString());
     int scale = num.scale();
-    byte[] bscale = { (byte) (scale) };
+    byte[] scaleBytes = { (byte) (scale) };
     byte[] buff = sig.toByteArray();
-    byte[] completeArr = new byte[buff.length + bscale.length];
-    System.arraycopy(bscale, 0, completeArr, 0, bscale.length);
-    System.arraycopy(buff, 0, completeArr, bscale.length, buff.length);
+    byte[] completeArr = new byte[buff.length + scaleBytes.length];
+    System.arraycopy(scaleBytes, 0, completeArr, 0, scaleBytes.length);
+    System.arraycopy(buff, 0, completeArr, scaleBytes.length, buff.length);
     return completeArr;
   }
 
@@ -273,9 +273,9 @@
    */
   public static BigDecimal byteToBigDecimal(byte[] raw) {
     int scale = (raw[0] & 0xFF);
-    byte[] unscale = new byte[raw.length - 1];
-    System.arraycopy(raw, 1, unscale, 0, unscale.length);
-    BigInteger sig = new BigInteger(unscale);
+    byte[] value = new byte[raw.length - 1];
+    System.arraycopy(raw, 1, value, 0, value.length);
+    BigInteger sig = new BigInteger(value);
     return new BigDecimal(sig, scale);
   }
 
@@ -287,9 +287,9 @@
    */
   public static BigDecimal byteToBigDecimal(byte[] raw, int offset, int length) {
     int scale = (raw[offset] & 0xFF);
-    byte[] unscale = new byte[length - 1];
-    System.arraycopy(raw, offset + 1, unscale, 0, unscale.length);
-    BigInteger sig = new BigInteger(unscale);
+    byte[] value = new byte[length - 1];
+    System.arraycopy(raw, offset + 1, value, 0, value.length);
+    BigInteger sig = new BigInteger(value);
     return new BigDecimal(sig, scale);
   }
 
@@ -354,7 +354,7 @@
           return null;
         }
         try {
-          Date dateToStr = dateformatter.get().parse(data);
+          Date dateToStr = dateFormatter.get().parse(data);
           return dateToStr.getTime() * 1000;
         } catch (ParseException e) {
           LOGGER.error("Cannot convert value to Time/Long type value" + e.getMessage(), e);
@@ -365,7 +365,7 @@
           return null;
         }
         try {
-          Date dateToStr = timeStampformatter.get().parse(data);
+          Date dateToStr = timestampFormatter.get().parse(data);
           return dateToStr.getTime() * 1000;
         } catch (ParseException e) {
           LOGGER.error("Cannot convert value to Time/Long type value" + e.getMessage(), e);
@@ -411,7 +411,7 @@
           dateFormatter = new SimpleDateFormat(dateFormat);
           dateFormatter.setLenient(false);
         } else {
-          dateFormatter = timeStampformatter.get();
+          dateFormatter = timestampFormatter.get();
         }
         dateToStr = dateFormatter.parse(dimensionValue);
         return ByteUtil.toXorBytes(dateToStr.getTime());
@@ -446,7 +446,7 @@
           dateFormatter = new SimpleDateFormat(dateFormat);
           dateFormatter.setLenient(false);
         } else {
-          dateFormatter = timeStampformatter.get();
+          dateFormatter = timestampFormatter.get();
         }
         dateToStr = dateFormatter.parse(dimensionValue);
         return dateToStr.getTime();
@@ -694,7 +694,7 @@
           return null;
         }
         try {
-          Date dateToStr = dateformatter.get().parse(data5);
+          Date dateToStr = dateFormatter.get().parse(data5);
           return dateToStr.getTime() * 1000;
         } catch (ParseException e) {
           LOGGER.error("Cannot convert value to Time/Long type value" + e.getMessage(), e);
@@ -706,7 +706,7 @@
           return null;
         }
         try {
-          Date dateToStr = timeStampformatter.get().parse(data6);
+          Date dateToStr = timestampFormatter.get().parse(data6);
           return dateToStr.getTime() * 1000;
         } catch (ParseException e) {
           LOGGER.error("Cannot convert value to Time/Long type value" + e.getMessage(), e);
@@ -739,7 +739,7 @@
   }
 
   /**
-   * Below method will be used to basically to know whether any non parseable
+   * Below method will be used to basically to know whether any non parsable
    * data is present or not. if present then return null so that system can
    * process to default null member value.
    *
@@ -908,8 +908,8 @@
               .getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
         } else {
           try {
-            timeStampformatter.remove();
-            Date dateToStr = timeStampformatter.get().parse(data);
+            timestampFormatter.remove();
+            Date dateToStr = timestampFormatter.get().parse(data);
             return ByteUtil.toXorBytes(dateToStr.getTime());
           } catch (ParseException e) {
             LOGGER.error(
@@ -983,8 +983,8 @@
   public static void setDataTypeConverter(DataTypeConverter converterLocal) {
     if (converterLocal != null) {
       converter = converterLocal;
-      timeStampformatter.remove();
-      dateformatter.remove();
+      timestampFormatter.remove();
+      dateFormatter.remove();
     }
   }
 
@@ -992,8 +992,8 @@
    * As each load can have it's own time format. Reset the thread local for each load.
    */
   public static void clearFormatter() {
-    timeStampformatter.remove();
-    dateformatter.remove();
+    timestampFormatter.remove();
+    dateFormatter.remove();
   }
 
   public static DataTypeConverter getDataTypeConverter() {
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java b/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
index b56d326..c95412c 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
@@ -182,7 +182,7 @@
       if (isForceDelete) {
         return true;
       }
-      long deletionTime = oneLoad.getModificationOrdeletionTimesStamp();
+      long deletionTime = oneLoad.getModificationOrDeletionTimestamp();
 
       return CarbonUpdateUtil.isMaxQueryTimeoutExceeded(deletionTime);
 
@@ -200,7 +200,7 @@
       if (isForceDelete) {
         return true;
       }
-      long deletionTime = oneLoad.getModificationOrdeletionTimesStamp();
+      long deletionTime = oneLoad.getModificationOrDeletionTimestamp();
 
       return CarbonUpdateUtil.isMaxQueryTimeoutExceeded(deletionTime);
 
diff --git a/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java b/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
index cd2bb13..5b98746 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
@@ -24,7 +24,7 @@
   //Record the time
   void recordDicShuffleAndWriteTime();
 
-  void recordLoadCsvfilesToDfTime();
+  void recordLoadCsvFilesToDfTime();
 
   void recordDictionaryValuesTotalTime(String partitionID,
       Long dictionaryValuesTotalTimeTimePoint);
diff --git a/core/src/main/java/org/apache/carbondata/core/util/TaskMetricsMap.java b/core/src/main/java/org/apache/carbondata/core/util/TaskMetricsMap.java
index ff4299e..aa481cf 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/TaskMetricsMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/TaskMetricsMap.java
@@ -123,7 +123,7 @@
       if (null != callbackList) {
         for (CarbonFSBytesReadOnThreadCallback callback : callbackList) {
           if (callback.threadId == callbackThreadId) {
-            callback.updatedReadBytes += callback.readbytes();
+            callback.updatedReadBytes += callback.readBytes();
             break;
           }
         }
@@ -132,7 +132,7 @@
   }
 
   /**
-   * returns total task read bytes, by summing all parent & spawned threads readbytes
+   * returns total task read bytes, by summing all parent & spawned threads read bytes
    *
    * @param threadName
    * @return
@@ -149,7 +149,7 @@
   }
 
   /**
-   * adds spawaned thread callback entry in metricmap using parentThreadId
+   * adds spawned thread callback entry in metric map using parentThreadId
    *
    * @param parentThreadId
    * @param callback
@@ -177,16 +177,16 @@
 
     CarbonFSBytesReadOnThreadCallback(long parentThread) {
       // reads current thread readBytes
-      this.baseline = readbytes();
+      this.baseline = readBytes();
       addEntry(parentThread, this);
     }
 
     /**
-     * returns current thread readbytes from FileSystem Statistics
+     * returns current thread read bytes from FileSystem Statistics
      *
      * @return
      */
-    public long readbytes() {
+    public long readBytes() {
       List<FileSystem.Statistics> statisticsList = FileSystem.getAllStatistics();
       long sum = 0;
       try {
diff --git a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
index 6914bcf..45f16ca 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
@@ -59,8 +59,8 @@
   private static final String STREAMING_CHECKPOINT_DIR = "checkpoint";
   private static final String STAGE_DIR = "stage";
   private static final String STAGE_DATA_DIR = "stage_data";
-  public static final String  SUCCESS_FILE_SUBFIX = ".success";
-  public static final String  LOADING_FILE_SUBFIX = ".loading";
+  public static final String SUCCESS_FILE_SUFFIX = ".success";
+  public static final String LOADING_FILE_SUFFIX = ".loading";
   private static final String SNAPSHOT_FILE_NAME = "snapshot";
 
   public static final String SYSTEM_FOLDER_DIR = "_system";
@@ -273,12 +273,12 @@
   }
 
   private static String getCarbonIndexFileName(String taskNo, int bucketNumber,
-      String factUpdatedtimeStamp, String segmentNo) {
+      String factUpdatedTimestamp, String segmentNo) {
     if (bucketNumber == -1) {
       return new StringBuilder()
           .append(taskNo).append(DASH)
           .append(segmentNo).append(DASH)
-          .append(factUpdatedtimeStamp)
+          .append(factUpdatedTimestamp)
           .append(INDEX_FILE_EXT)
           .toString();
     } else {
@@ -286,7 +286,7 @@
           .append(taskNo).append(DASH)
           .append(bucketNumber).append(DASH)
           .append(segmentNo).append(DASH)
-          .append(factUpdatedtimeStamp)
+          .append(factUpdatedTimestamp)
           .append(INDEX_FILE_EXT)
           .toString();
     }
@@ -398,7 +398,7 @@
   /**
    * Return store path for index based on the indexName,
    *
-   * @return store path based on indexname
+   * @return store path based on index name
    */
   public static String getIndexesStorePath(String tablePath, String segmentId,
       String indexName) {
@@ -564,15 +564,15 @@
     }
 
     /**
-     * gets segement id from given absolute data file path
+     * gets segment id from given absolute data file path
      */
     public static String getSegmentIdFromPath(String dataFileAbsolutePath) {
       // find segment id from last of data file path
-      String tempdataFileAbsolutePath = dataFileAbsolutePath.replace(
+      String tempDataFileAbsolutePath = dataFileAbsolutePath.replace(
           CarbonCommonConstants.WINDOWS_FILE_SEPARATOR, CarbonCommonConstants.FILE_SEPARATOR);
-      int endIndex = tempdataFileAbsolutePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR);
+      int endIndex = tempDataFileAbsolutePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR);
       // + 1 for size of "/"
-      int startIndex = tempdataFileAbsolutePath.lastIndexOf(
+      int startIndex = tempDataFileAbsolutePath.lastIndexOf(
           CarbonCommonConstants.FILE_SEPARATOR, endIndex - 1) + 1;
       String segmentDirStr = dataFileAbsolutePath.substring(startIndex, endIndex);
       //identify id in segment_<id>
diff --git a/core/src/main/java/org/apache/carbondata/core/view/MVCatalog.java b/core/src/main/java/org/apache/carbondata/core/view/MVCatalog.java
index 0cecee9..861ec2e 100644
--- a/core/src/main/java/org/apache/carbondata/core/view/MVCatalog.java
+++ b/core/src/main/java/org/apache/carbondata/core/view/MVCatalog.java
@@ -20,7 +20,7 @@
 import org.apache.carbondata.core.metadata.schema.table.RelationIdentifier;
 
 /**
- * This is the interface for inmemory catalog registry for mv.
+ * This is the interface for in-memory catalog registry for mv.
  * For query rewrite.
  * @since 1.4.0
  */
diff --git a/core/src/main/java/org/apache/carbondata/core/view/MVManager.java b/core/src/main/java/org/apache/carbondata/core/view/MVManager.java
index 37658df..e2a8d9f 100644
--- a/core/src/main/java/org/apache/carbondata/core/view/MVManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/view/MVManager.java
@@ -296,7 +296,7 @@
       try {
         if (carbonLock.lockWithRetries()) {
           LOGGER.info("Acquired lock for table" + relationIdentifier.getDatabaseName() + "."
-              + relationIdentifier.getTableName() + " for table status updation");
+              + relationIdentifier.getTableName() + " for table status update");
           String metaDataPath =
               CarbonTablePath.getMetadataPath(relationIdentifier.getTablePath());
           LoadMetadataDetails[] loadMetadataDetails =
@@ -308,20 +308,20 @@
               CarbonTablePath.getTableStatusFilePath(relationIdentifier.getTablePath()),
               loadMetadataDetails);
         } else {
-          LOGGER.error("Not able to acquire the lock for Table status updation for table "
+          LOGGER.error("Not able to acquire the lock for Table status update for table "
               + relationIdentifier.getDatabaseName() + "." + relationIdentifier
               .getTableName());
         }
       } finally {
         if (carbonLock.unlock()) {
           LOGGER.info(
-              "Table unlocked successfully after table status updation" + relationIdentifier
+              "Table unlocked successfully after table status update" + relationIdentifier
                   .getDatabaseName() + "." + relationIdentifier.getTableName());
         } else {
           LOGGER.error(
               "Unable to unlock Table lock for table" + relationIdentifier.getDatabaseName()
                   + "." + relationIdentifier.getTableName()
-                  + " during table status updation");
+                  + " during table status update");
         }
       }
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/view/MVSchema.java b/core/src/main/java/org/apache/carbondata/core/view/MVSchema.java
index 531c0c0..3887c97 100644
--- a/core/src/main/java/org/apache/carbondata/core/view/MVSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/view/MVSchema.java
@@ -40,7 +40,7 @@
 import com.google.gson.Gson;
 
 /**
- * It is the new schama of mv and it has less fields compare to {{@link MVSchema}}
+ * It is the new schema of mv and it has less fields compare to {{@link MVSchema}}
  */
 public class MVSchema implements Serializable, Writable {
 
@@ -77,7 +77,7 @@
   private Map<Integer, String> columnsOrderMap;
 
   /**
-   * timeseries query
+   * time series query
    */
   private boolean timeSeries;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java b/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
index c93bb1d..40c346d 100644
--- a/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
+++ b/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
@@ -66,10 +66,10 @@
   }
 
   /**
-   * Merge all the carbonindex files of segment to a  merged file
+   * Merge all the carbon index files of segment to a  merged file
    * @param tablePath
-   * @param indexFileNamesTobeAdded while merging it comsiders only these files.
-   *                                If null then consider all
+   * @param indexFileNamesTobeAdded while merging, it considers only these files.
+   *                                If null, then consider all
    * @param readFileFooterFromCarbonDataFile flag to read file footer information from carbondata
    *                                         file. This will used in case of upgrade from version
    *                                         which do not store the blocklet info to current version
@@ -190,7 +190,7 @@
       // this case will be used in case of upgrade where old store will not have the blocklet
       // info in the index file and therefore blocklet info need to be read from the file footer
       // in the carbondata file
-      fileStore.readAllIndexAndFillBolckletInfo(segmentPath);
+      fileStore.readAllIndexAndFillBlockletInfo(segmentPath);
     } else {
       fileStore.readAllIIndexOfSegment(segmentPath);
     }
@@ -232,16 +232,16 @@
     for (Map.Entry<String, Map<String, byte[]>> entry : indexLocationMap.entrySet()) {
       String mergeIndexFile =
           writeMergeIndexFile(indexFileNamesTobeAdded, entry.getKey(), entry.getValue(), segmentId);
-      for (Map.Entry<String, SegmentFileStore.FolderDetails> segentry : segmentFileStore
+      for (Map.Entry<String, SegmentFileStore.FolderDetails> segment : segmentFileStore
           .getLocationMap().entrySet()) {
-        String location = segentry.getKey();
-        if (segentry.getValue().isRelative()) {
+        String location = segment.getKey();
+        if (segment.getValue().isRelative()) {
           location =
               segmentFileStore.getTablePath() + CarbonCommonConstants.FILE_SEPARATOR + location;
         }
         if (FileFactory.getCarbonFile(entry.getKey()).equals(FileFactory.getCarbonFile(location))) {
-          segentry.getValue().setMergeFileName(mergeIndexFile);
-          segentry.getValue().setFiles(new HashSet<String>());
+          segment.getValue().setMergeFileName(mergeIndexFile);
+          segment.getValue().setFiles(new HashSet<String>());
           break;
         }
       }
@@ -297,7 +297,7 @@
   }
 
   /**
-   * Merge all the carbonindex files of segment to a  merged file
+   * Merge all the carbon index files of segment to a  merged file
    *
    * @param segmentId
    */
@@ -307,7 +307,7 @@
   }
 
   /**
-   * Merge all the carbonindex files of segment to a  merged file
+   * Merge all the carbon index files of segment to a  merged file
    *
    * @param segmentId
    * @param readFileFooterFromCarbonDataFile
diff --git a/core/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java b/core/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
index 6ecb8c5..3809609 100644
--- a/core/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
+++ b/core/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
@@ -88,7 +88,7 @@
 
   private String indexWritePath;
   /**
-   * validBlockletIds will contain the valid blocklted ids for a given block that contains the data
+   * validBlockletIds will contain the valid blocklet ids for a given block that contains the data
    * after pruning from driver. These will be used in executor for further pruning of blocklets
    */
   private Set<Integer> validBlockletIds;
@@ -117,8 +117,8 @@
 
   /**
    * used in case of index server, all the fields which is required
-   * only in case in executor not need to deseralize and will be kept as
-   * byte array and duing write method directly it will be written to output stream
+   * only in case in executor not need to deserialize and will be kept as
+   * byte array and during write method directly it will be written to output stream
    */
   private byte[] serializeData;
 
@@ -168,8 +168,8 @@
       String blockletId) throws IOException {
     this.filePath = filePath;
     this.blockletId = blockletId;
-    // getting the underline stream to get the actual position of the fileds which won't be
-    // deseralize as its used by executor
+    // getting the underline stream to get the actual position of the file which won't be
+    // deserialize as its used by executor
     ExtendedByteArrayInputStream underlineStream =
         ((ExtendedDataInputStream) in).getUnderlineStream();
     // current position
@@ -187,7 +187,7 @@
     this.start = in.readLong();
     this.length = in.readLong();
     this.version = ColumnarFormatVersion.valueOf(in.readShort());
-    // will be removed after count(*) optmization in case of index server
+    // will be removed after count(*) optimization in case of index server
     this.rowCount = in.readInt();
     if (in.readBoolean()) {
       int numberOfDeleteDeltaFiles = in.readInt();
@@ -196,7 +196,7 @@
         deleteDeltaFiles[i] = in.readUTF();
       }
     }
-    // after deseralizing required field get the start position of field which will be only used
+    // after deserializing required field get the start position of field which will be only used
     // in executor
     int leftoverPosition = underlineStream.getPosition();
     // position of next split
@@ -330,7 +330,7 @@
   }
 
   public String getSegmentId() {
-    derserializeField();
+    deserializeField();
     if (segment != null) {
       return segment.getSegmentNo();
     } else {
@@ -339,14 +339,14 @@
   }
 
   public Segment getSegment() {
-    derserializeField();
+    deserializeField();
     return segment;
   }
 
   @Override
   public void readFields(DataInput in) throws IOException {
-    // if serializeData is not null it means fields which is present below if condition are alredy
-    // deserialize  org.apache.carbondata.hadoop.CarbonInputSplit#CarbonInputSplit(
+    // if serializeData is not null it means fields which is present below if condition are already
+    // deserialize org.apache.carbondata.hadoop.CarbonInputSplit#CarbonInputSplit(
     // int, java.io.DataInput, java.lang.String, java.lang.String[], java.lang.String)
     if (null == serializeData) {
       this.filePath = in.readUTF();
@@ -403,7 +403,7 @@
     out.writeLong(start);
     out.writeLong(length);
     out.writeShort(version.number());
-    //TODO remove this code once count(*) optmization is added in case of index server
+    //TODO remove this code once count(*) optimization is added in case of index server
     if (null != indexRow) {
       out.writeInt(this.indexRow.getInt(BlockletIndexRowIndexes.ROW_COUNT_INDEX));
     } else if (null != detailInfo) {
@@ -480,11 +480,11 @@
       return -1;
     }
     CarbonInputSplit other = (CarbonInputSplit) o;
-    derserializeField();
-    other.derserializeField();
+    deserializeField();
+    other.deserializeField();
     int compareResult = 0;
     // get the segment id
-    // converr seg ID to double.
+    // convert seg ID to double.
 
     double seg1 = Double.parseDouble(segment.getSegmentNo());
     double seg2 = Double.parseDouble(other.segment.getSegmentNo());
@@ -545,7 +545,7 @@
 
   @Override
   public int hashCode() {
-    derserializeField();
+    deserializeField();
     int result = taskId.hashCode();
     result = 31 * result + segment.hashCode();
     result = 31 * result + bucketId.hashCode();
@@ -572,7 +572,7 @@
   }
 
   /**
-   * returns map of blocklocation and storage id
+   * returns map of block location and storage id
    *
    * @return
    */
@@ -652,7 +652,7 @@
     }
     out.writeShort(this.indexRow.getShort(BlockletIndexRowIndexes.VERSION_INDEX));
     out.writeShort(Short.parseShort(this.blockletId));
-    out.writeLong(this.indexRow.getLong(BlockletIndexRowIndexes.SCHEMA_UPADATED_TIME_INDEX));
+    out.writeLong(this.indexRow.getLong(BlockletIndexRowIndexes.SCHEMA_UPDATED_TIME_INDEX));
     out.writeBoolean(false);
     out.writeLong(this.indexRow.getLong(BlockletIndexRowIndexes.BLOCK_FOOTER_OFFSET));
     // write -1 if columnSchemaBinary is null so that at the time of reading it can distinguish
@@ -689,7 +689,7 @@
           .setVersionNumber(this.indexRow.getShort(BlockletIndexRowIndexes.VERSION_INDEX));
       detailInfo.setBlockletId(Short.parseShort(this.blockletId));
       detailInfo.setSchemaUpdatedTimeStamp(
-          this.indexRow.getLong(BlockletIndexRowIndexes.SCHEMA_UPADATED_TIME_INDEX));
+          this.indexRow.getLong(BlockletIndexRowIndexes.SCHEMA_UPDATED_TIME_INDEX));
       detailInfo.setBlockFooterOffset(
           this.indexRow.getLong(BlockletIndexRowIndexes.BLOCK_FOOTER_OFFSET));
       start = detailInfo.getBlockFooterOffset();
@@ -753,7 +753,7 @@
    * so footer offsets needs to be written correctly, so updating the length
    *
    */
-  public void updateFooteroffset() {
+  public void updateFooterOffset() {
     if (isBlockCache && start == 0) {
       if (null != indexRow) {
         start = this.indexRow.getLong(BlockletIndexRowIndexes.BLOCK_FOOTER_OFFSET);
@@ -839,7 +839,7 @@
    * This method will be used to deserialize fields
    * in case of index server
    */
-  private void derserializeField() {
+  private void deserializeField() {
     if (null != serializeData) {
       DataInputStream in = null;
       try {
diff --git a/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndex.java b/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndex.java
index 17686d9..818c824 100644
--- a/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndex.java
+++ b/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndex.java
@@ -25,7 +25,7 @@
 import org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonImplicitDimension;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.executer.ImplicitIncludeFilterExecutorImpl;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.util.ByteUtil;
@@ -45,7 +45,7 @@
     dimColumnEvaluatorInfo.setColumnIndex(0);
     dimColumnEvaluatorInfo.setRowIndex(0);
     dimColumnEvaluatorInfo.setDimension(carbonImplicitDimension);
-    dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
+    dimColumnEvaluatorInfo.setDimensionExistsInCurrentSlice(false);
     implicitIncludeFilterExecutor =
         new ImplicitIncludeFilterExecutorImpl(dimColumnEvaluatorInfo);
   }
@@ -73,7 +73,7 @@
         new SegmentPropertiesAndSchemaHolder.SegmentPropertiesWrapper(new CarbonTable(),
             new ArrayList<>()));
     Method method = BlockIndex.class
-        .getDeclaredMethod("addBlockBasedOnMinMaxValue", FilterExecuter.class, byte[][].class,
+        .getDeclaredMethod("addBlockBasedOnMinMaxValue", FilterExecutor.class, byte[][].class,
             byte[][].class, boolean[].class, String.class, int.class);
     method.setAccessible(true);
 
diff --git a/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndexFactory.java b/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndexFactory.java
index 2186a05..70ab488 100644
--- a/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndexFactory.java
+++ b/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletIndexFactory.java
@@ -35,7 +35,6 @@
 import org.apache.carbondata.core.indexstore.BlockletIndexWrapper;
 import org.apache.carbondata.core.indexstore.TableBlockIndexUniqueIdentifier;
 import org.apache.carbondata.core.indexstore.TableBlockIndexUniqueIdentifierWrapper;
-import org.apache.carbondata.core.memory.MemoryException;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.IndexSchema;
@@ -140,7 +139,7 @@
       }
     };
     List<IndexInputSplit> validDistributables =
-        blockletIndexFactory.getAllUncachedDistributables(indexInputSplits);
+        blockletIndexFactory.getAllUncached(indexInputSplits);
     assert 1 == validDistributables.size();
   }
 }
\ No newline at end of file
diff --git a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecutorImplTest.java
similarity index 96%
rename from core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java
rename to core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecutorImplTest.java
index 2bb8415..b305db9 100644
--- a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecutorImplTest.java
@@ -22,7 +22,7 @@
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnPage;
 import org.apache.carbondata.core.util.CarbonUtil;
 
-public class ExcludeFilterExecuterImplTest extends IncludeFilterExecuterImplTest {
+public class ExcludeFilterExecutorImplTest extends IncludeFilterExecutorImplTest {
 
  @Override
   public BitSet setFilterdIndexToBitSetNew(DimensionColumnPage dimColumnDataChunk,
diff --git a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecutorImplTest.java
similarity index 98%
rename from core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java
rename to core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecutorImplTest.java
index 41083b0..58d6538 100644
--- a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecutorImplTest.java
@@ -29,7 +29,7 @@
 
 import junit.framework.TestCase;
 
-public class IncludeFilterExecuterImplTest extends TestCase {
+public class IncludeFilterExecutorImplTest extends TestCase {
 
   /**
    * @throws Exception
@@ -165,7 +165,7 @@
     System.out.println("dimColumnSize: " + dimColumnSize);
     
     FixedLengthDimensionColumnPage dimensionColumnDataChunk;
-    DimColumnExecuterFilterInfo dim = new DimColumnExecuterFilterInfo();
+    DimColumnExecutorFilterInfo dim = new DimColumnExecutorFilterInfo();
 
     byte[] dataChunk = new byte[dataChunkSize * dimColumnSize];
     for (int i = 0; i < dataChunkSize; i++) {
@@ -283,7 +283,7 @@
     // column dictionary size
     int dimColumnSize = 2;
     FixedLengthDimensionColumnPage dimensionColumnDataChunk;
-    DimColumnExecuterFilterInfo dim = new DimColumnExecuterFilterInfo();
+    DimColumnExecutorFilterInfo dim = new DimColumnExecutorFilterInfo();
 
     byte[] dataChunk = new byte[dataChunkSize * dimColumnSize];
     for (int i = 0; i < dataChunkSize; i++) {
diff --git a/core/src/test/java/org/apache/carbondata/core/util/RangeFilterProcessorTest.java b/core/src/test/java/org/apache/carbondata/core/util/RangeFilterProcessorTest.java
index 2b1704e..9f3d50a 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/RangeFilterProcessorTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/RangeFilterProcessorTest.java
@@ -34,9 +34,9 @@
 import org.apache.carbondata.core.scan.expression.logical.OrExpression;
 import org.apache.carbondata.core.scan.expression.logical.RangeExpression;
 import org.apache.carbondata.core.scan.expression.logical.TrueExpression;
-import org.apache.carbondata.core.scan.filter.executer.RangeValueFilterExecuterImpl;
+import org.apache.carbondata.core.scan.filter.executer.RangeValueFilterExecutorImpl;
 import org.apache.carbondata.core.scan.filter.intf.FilterOptimizer;
-import org.apache.carbondata.core.scan.filter.optimizer.RangeFilterOptmizer;
+import org.apache.carbondata.core.scan.filter.optimizer.RangeFilterOptimizer;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 
 import mockit.Deencapsulation;
@@ -110,7 +110,7 @@
         new LessThanEqualToExpression(new ColumnExpression("a", DataTypes.STRING),
             new LiteralExpression("20", DataTypes.STRING))), new TrueExpression(null));
     FilterOptimizer rangeFilterOptimizer =
-        new RangeFilterOptmizer(inputFilter);
+        new RangeFilterOptimizer(inputFilter);
     rangeFilterOptimizer.optimizeFilter();
     result = checkBothTrees(inputFilter, output);
     Assert.assertTrue(result);
@@ -151,7 +151,7 @@
         new LessThanEqualToExpression(new ColumnExpression("a", DataTypes.STRING),
             new LiteralExpression("05", DataTypes.STRING)));
     FilterOptimizer rangeFilterOptimizer =
-        new RangeFilterOptmizer(inputFilter);
+        new RangeFilterOptimizer(inputFilter);
     rangeFilterOptimizer.optimizeFilter();
     result = checkBothTrees(inputFilter, output);
     // no change
@@ -226,7 +226,7 @@
     Expression Andb3 = new AndExpression(Andb2, new TrueExpression(null));
 
     FilterOptimizer rangeFilterOptimizer =
-        new RangeFilterOptmizer(inputFilter);
+        new RangeFilterOptimizer(inputFilter);
     rangeFilterOptimizer.optimizeFilter();
     result = checkBothTrees(inputFilter, new AndExpression(Andb3, new TrueExpression(null)));
     // no change
@@ -310,7 +310,7 @@
     Expression Orb3 = new OrExpression(Orb2, lessThanb2);
 
     FilterOptimizer rangeFilterOptimizer =
-        new RangeFilterOptmizer(inputFilter);
+        new RangeFilterOptimizer(inputFilter);
     rangeFilterOptimizer.optimizeFilter();
     result = checkBothTrees(inputFilter, new OrExpression(Orb3, lessThanb1));
     // no change
@@ -324,7 +324,7 @@
 
     byte[][] filterMinMax = { { (byte) 10 }, { (byte) 20 } };
 
-    RangeValueFilterExecuterImpl range = new MockUp<RangeValueFilterExecuterImpl>() {
+    RangeValueFilterExecutorImpl range = new MockUp<RangeValueFilterExecutorImpl>() {
     }.getMockInstance();
     Deencapsulation.setField(range, "isDimensionPresentInCurrentBlock", true);
     Deencapsulation.setField(range, "lessThanExp", true);
@@ -342,7 +342,7 @@
 
     byte[][] filterMinMax = { { (byte) 10 }, { (byte) 20 } };
 
-    RangeValueFilterExecuterImpl range = new MockUp<RangeValueFilterExecuterImpl>() {
+    RangeValueFilterExecutorImpl range = new MockUp<RangeValueFilterExecutorImpl>() {
     }.getMockInstance();
     Deencapsulation.setField(range, "isDimensionPresentInCurrentBlock", true);
     Deencapsulation.setField(range, "lessThanExp", true);
@@ -359,7 +359,7 @@
 
     byte[][] filterMinMax = { { (byte) 10 }, { (byte) 20 } };
 
-    RangeValueFilterExecuterImpl range = new MockUp<RangeValueFilterExecuterImpl>() {
+    RangeValueFilterExecutorImpl range = new MockUp<RangeValueFilterExecutorImpl>() {
     }.getMockInstance();
     Deencapsulation.setField(range, "isDimensionPresentInCurrentBlock", true);
     Deencapsulation.setField(range, "lessThanExp", true);
@@ -377,7 +377,7 @@
 
     byte[][] filterMinMax = { { (byte) 10 }, { (byte) 20 } };
 
-    RangeValueFilterExecuterImpl range = new MockUp<RangeValueFilterExecuterImpl>() {
+    RangeValueFilterExecutorImpl range = new MockUp<RangeValueFilterExecutorImpl>() {
     }.getMockInstance();
     Deencapsulation.setField(range, "isDimensionPresentInCurrentBlock", true);
     Deencapsulation.setField(range, "lessThanExp", true);
@@ -398,7 +398,7 @@
 
     byte[][] filterMinMax = { { (byte) 10 }, { (byte) 20 } };
 
-    RangeValueFilterExecuterImpl range = new MockUp<RangeValueFilterExecuterImpl>() {
+    RangeValueFilterExecutorImpl range = new MockUp<RangeValueFilterExecutorImpl>() {
     }.getMockInstance();
     Deencapsulation.setField(range, "isDimensionPresentInCurrentBlock", true);
     Deencapsulation.setField(range, "lessThanExp", true);
@@ -419,7 +419,7 @@
 
     byte[][] filterMinMax = { { (byte) 15 }, { (byte) 20 } };
 
-    RangeValueFilterExecuterImpl range = new MockUp<RangeValueFilterExecuterImpl>() {
+    RangeValueFilterExecutorImpl range = new MockUp<RangeValueFilterExecutorImpl>() {
     }.getMockInstance();
     Deencapsulation.setField(range, "isDimensionPresentInCurrentBlock", true);
     Deencapsulation.setField(range, "lessThanExp", true);
diff --git a/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java b/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
index 953ba48..78457d1 100644
--- a/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
+++ b/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
@@ -32,7 +32,7 @@
 import org.apache.carbondata.core.scan.expression.ExpressionResult;
 import org.apache.carbondata.core.scan.expression.UnknownExpression;
 import org.apache.carbondata.core.scan.expression.conditional.ConditionalExpression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
@@ -180,7 +180,7 @@
   }
 
   @Override
-  public FilterExecuter getFilterExecuter(FilterResolverIntf resolver,
+  public FilterExecutor getFilterExecutor(FilterResolverIntf resolver,
       SegmentProperties segmentProperties) {
     assert (resolver instanceof RowLevelFilterResolverImpl);
     RowLevelFilterResolverImpl rowLevelResolver = (RowLevelFilterResolverImpl) resolver;
diff --git a/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java b/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
index 094dbe8..4253c3f 100644
--- a/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
+++ b/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
@@ -26,7 +26,7 @@
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
-import org.apache.carbondata.core.scan.filter.executer.RowLevelFilterExecuterImpl;
+import org.apache.carbondata.core.scan.filter.executer.RowLevelFilterExecutorImpl;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
 import org.apache.carbondata.core.util.DataTypeUtil;
@@ -35,7 +35,7 @@
 /**
  * Polygon filter executor. Prunes Blocks and Blocklets based on the selected ranges of polygon.
  */
-public class PolygonFilterExecutorImpl extends RowLevelFilterExecuterImpl {
+public class PolygonFilterExecutorImpl extends RowLevelFilterExecutorImpl {
   public PolygonFilterExecutorImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, SegmentProperties segmentProperties,
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/stream/StreamRecordReader.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/stream/StreamRecordReader.java
index 14637ee..5b284e9 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/stream/StreamRecordReader.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/stream/StreamRecordReader.java
@@ -42,7 +42,7 @@
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.intf.RowImpl;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
@@ -98,7 +98,7 @@
   protected boolean isFinished = false;
 
   // filter
-  protected FilterExecuter filter;
+  protected FilterExecutor filter;
   private boolean[] isFilterRequired;
   private Object[] filterValues;
   protected RowIntf filterRow;
@@ -219,7 +219,7 @@
     Map<Integer, GenericQueryType> complexDimensionInfoMap = new HashMap<>();
 
     FilterResolverIntf resolverIntf = model.getIndexFilter().getResolver();
-    filter = FilterUtil.getFilterExecuterTree(
+    filter = FilterUtil.getFilterExecutorTree(
         resolverIntf, segmentProperties, complexDimensionInfoMap, true);
     // for row filter, we need update column index
     FilterUtil.updateIndexOfColumnExpression(resolverIntf.getFilterExpression(),
diff --git a/index/bloom/src/main/java/org/apache/carbondata/index/bloom/BloomCoarseGrainIndex.java b/index/bloom/src/main/java/org/apache/carbondata/index/bloom/BloomCoarseGrainIndex.java
index 40fdcaa..1d67ec5 100644
--- a/index/bloom/src/main/java/org/apache/carbondata/index/bloom/BloomCoarseGrainIndex.java
+++ b/index/bloom/src/main/java/org/apache/carbondata/index/bloom/BloomCoarseGrainIndex.java
@@ -46,7 +46,7 @@
 import org.apache.carbondata.core.scan.expression.conditional.ListExpression;
 import org.apache.carbondata.core.scan.expression.exception.FilterIllegalMemberException;
 import org.apache.carbondata.core.scan.expression.logical.AndExpression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonUtil;
@@ -132,7 +132,7 @@
 
   @Override
   public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties,
-      FilterExecuter filterExecuter, CarbonTable carbonTable) {
+      FilterExecutor filterExecutor, CarbonTable carbonTable) {
     Set<Blocklet> hitBlocklets = null;
     if (filterExp == null) {
       // null is different from empty here. Empty means after pruning, no blocklet need to scan.
diff --git a/index/lucene/src/main/java/org/apache/carbondata/index/lucene/LuceneFineGrainIndex.java b/index/lucene/src/main/java/org/apache/carbondata/index/lucene/LuceneFineGrainIndex.java
index 3351c16..e1e4171 100644
--- a/index/lucene/src/main/java/org/apache/carbondata/index/lucene/LuceneFineGrainIndex.java
+++ b/index/lucene/src/main/java/org/apache/carbondata/index/lucene/LuceneFineGrainIndex.java
@@ -36,7 +36,7 @@
 import org.apache.carbondata.core.metadata.schema.table.IndexSchema;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.MatchExpression;
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 
@@ -202,7 +202,7 @@
    */
   @Override
   public List<FineGrainBlocklet> prune(FilterResolverIntf filterExp,
-      SegmentProperties segmentProperties, FilterExecuter filterExecuter,
+      SegmentProperties segmentProperties, FilterExecutor filterExecutor,
       CarbonTable carbonTable) throws IOException {
 
     // convert filter expr into lucene list query
diff --git a/integration/flink/src/main/java/org/apache/carbon/core/metadata/StageManager.java b/integration/flink/src/main/java/org/apache/carbon/core/metadata/StageManager.java
index 8b6c2d7..5abd0ed 100644
--- a/integration/flink/src/main/java/org/apache/carbon/core/metadata/StageManager.java
+++ b/integration/flink/src/main/java/org/apache/carbon/core/metadata/StageManager.java
@@ -67,7 +67,7 @@
     }
 
     try {
-      writeSuccessFile(stageInputPath + CarbonTablePath.SUCCESS_FILE_SUBFIX);
+      writeSuccessFile(stageInputPath + CarbonTablePath.SUCCESS_FILE_SUFFIX);
     } catch (Throwable exception) {
       try {
         CarbonUtil.deleteFoldersAndFiles(FileFactory.getCarbonFile(stageInputPath));
diff --git a/integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala b/integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala
index 5e82b96..5321d05 100644
--- a/integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala
+++ b/integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala
@@ -426,7 +426,7 @@
     assert(unloadedFiles.length > 0)
     val loadingFilesCountBefore = loadingFiles.length
     FileFactory.getCarbonFile(unloadedFiles(0).getAbsolutePath +
-      CarbonTablePath.LOADING_FILE_SUBFIX).createNewFile()
+      CarbonTablePath.LOADING_FILE_SUFFIX).createNewFile()
     loadingFiles = CarbonStore.listStageFiles(stagePath)._2
     val loadingFilesCountAfter = loadingFiles.length
     assert(loadingFilesCountAfter == loadingFilesCountBefore + 1)
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala b/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
index d970e00..7de86e9 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
@@ -131,21 +131,21 @@
       //        which exclude the success files and loading files
       // Second,  only collect the stage files having success tag.
       val stageFiles = allFiles.filterNot { file =>
-        file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUBFIX)
+        file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUFFIX)
       }.filterNot { file =>
-        file.getName.endsWith(CarbonTablePath.LOADING_FILE_SUBFIX)
+        file.getName.endsWith(CarbonTablePath.LOADING_FILE_SUFFIX)
       }.filter { file =>
-        allFiles.contains(file.getName + CarbonTablePath.SUCCESS_FILE_SUBFIX)
+        allFiles.contains(file.getName + CarbonTablePath.SUCCESS_FILE_SUFFIX)
       }.sortWith {
         (file1, file2) => file1.getLastModifiedTime > file2.getLastModifiedTime
       }
       // 3. Get the unloaded stage files, which haven't loading tag.
       val unloadedFiles = stageFiles.filterNot { file =>
-        allFiles.contains(file.getName + CarbonTablePath.LOADING_FILE_SUBFIX)
+        allFiles.contains(file.getName + CarbonTablePath.LOADING_FILE_SUFFIX)
       }
       // 4. Get the loading stage files, which have loading tag.
       val loadingFiles = stageFiles.filter { file =>
-        allFiles.contains(file.getName + CarbonTablePath.LOADING_FILE_SUBFIX)
+        allFiles.contains(file.getName + CarbonTablePath.LOADING_FILE_SUFFIX)
       }
       (unloadedFiles, loadingFiles)
     } else {
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala
index becbfb7..0d85fad 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala
@@ -63,7 +63,7 @@
           indexFormat.getCarbonTable.getAbsoluteTableIdentifier, filterProcessor)
         indexFormat.setFilterResolverIntf(filterInf)
         IndexServer.getClient.getSplits(indexFormat)
-          .getExtendedBlockets(indexFormat.getCarbonTable.getTablePath, indexFormat
+          .getExtendedBlocklets(indexFormat.getCarbonTable.getTablePath, indexFormat
             .getQueryId, indexFormat.isCountStarJob)
       } finally {
         if (null != splitFolderPath && !splitFolderPath.deleteFile()) {
@@ -100,7 +100,7 @@
     }
     if (filterInf.isInstanceOf[RowLevelFilterResolverImpl] &&
         filterInf.getFilterExpression.getFilterExpressionType == ExpressionType.UNKNOWN) {
-      return filterProcessor.changeUnknownResloverToTrue(tableIdentifer)
+      return filterProcessor.changeUnknownResolverToTrue(tableIdentifer)
     }
     filterInf
   }
@@ -121,7 +121,7 @@
     val originalJobDesc = spark.sparkContext.getLocalProperty("spark.job.description")
     indexFormat.setIsWriteToFile(false)
     indexFormat.setFallbackJob()
-    val splits = IndexServer.getSplits(indexFormat).getExtendedBlockets(indexFormat
+    val splits = IndexServer.getSplits(indexFormat).getExtendedBlocklets(indexFormat
       .getCarbonTable.getTablePath, indexFormat.getQueryId, indexFormat.isCountStarJob)
     // Fire a job to clear the cache from executors as Embedded mode does not maintain the cache.
     if (!indexFormat.isJobToClearIndexes) {
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index ad8d982..b049373 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -100,7 +100,7 @@
     val lock = CarbonLockFactory.getSystemLevelCarbonLockObj(
       configuredMdtPath + CarbonCommonConstants.FILE_SEPARATOR +
         CarbonCommonConstants.SYSTEM_LEVEL_COMPACTION_LOCK_FOLDER,
-      LockUsage.SYSTEMLEVEL_COMPACTION_LOCK)
+      LockUsage.SYSTEM_LEVEL_COMPACTION_LOCK)
 
     if (lock.lockWithRetries()) {
       LOGGER.info(s"Acquired the compaction lock for table ${ carbonLoadModel.getDatabaseName }" +
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonDeleteStageFilesCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonDeleteStageFilesCommand.scala
index 456a410..2432340 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonDeleteStageFilesCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonDeleteStageFilesCommand.scala
@@ -99,7 +99,7 @@
     val stageDirectory = FileFactory.getCarbonFile(stagePath, configuration)
     if (stageDirectory.exists()) {
       stageDirectory.listFiles().filter { file =>
-        !file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUBFIX)
+        !file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUFFIX)
       }
     } else {
       Seq.empty
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala
index e8420ec..e8331f0 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala
@@ -507,7 +507,7 @@
           // Get the loading files path
           val stageLoadingFile =
             FileFactory.getCarbonFile(files._1.getAbsolutePath +
-              CarbonTablePath.LOADING_FILE_SUBFIX);
+              CarbonTablePath.LOADING_FILE_SUFFIX);
           // Try to create loading files
           // make isFailed to be true if createNewFile return false.
           // the reason can be file exists or exceptions.
@@ -565,7 +565,7 @@
           // Delete three types of file: stage|.success|.loading
           val stageLoadingFile =
             FileFactory.getCarbonFile(files._1.getAbsolutePath
-              + CarbonTablePath.LOADING_FILE_SUBFIX);
+              + CarbonTablePath.LOADING_FILE_SUFFIX);
           var isFailed = false
           // If delete() return false, maybe the reason is FileNotFount or FileFailedClean.
           // Considering FileNotFound means FileCleanSucessfully.
@@ -657,7 +657,7 @@
     if (dir.exists()) {
       val allFiles = dir.listFiles()
       val successFiles = allFiles.filter { file =>
-        file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUBFIX)
+        file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUFFIX)
       }.map { file =>
         (file.getName.substring(0, file.getName.indexOf(".")), file)
       }.toMap
@@ -669,7 +669,7 @@
       // 1) stages never loaded, choose the stages without '.loading' tag.
       // 2) stages loaded timeout, the timeout threshold depends on INSERT_STAGE_TIMEOUT
       val loadingFiles = allFiles.filter { file =>
-        file.getName.endsWith(CarbonTablePath.LOADING_FILE_SUBFIX)
+        file.getName.endsWith(CarbonTablePath.LOADING_FILE_SUFFIX)
       }.filter { file =>
         (System.currentTimeMillis() - file.getLastModifiedTime) <
           CarbonInsertFromStageCommand.INSERT_STAGE_TIMEOUT
@@ -678,9 +678,9 @@
       }.toMap
 
       val stageFiles = allFiles.filter { file =>
-        !file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUBFIX)
+        !file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUFFIX)
       }.filter { file =>
-        !file.getName.endsWith(CarbonTablePath.LOADING_FILE_SUBFIX)
+        !file.getName.endsWith(CarbonTablePath.LOADING_FILE_SUFFIX)
       }.filter { file =>
         successFiles.contains(file.getName)
       }.filterNot { file =>
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
index 25d35ec..4147f4b 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
@@ -177,7 +177,7 @@
         CarbonCommonConstants.LOCK_PATH_DEFAULT).toLowerCase
         .nonEmpty) {
         val tableLockPath = CarbonLockFactory
-          .getLockpath(carbonTable.getCarbonTableIdentifier.getTableId)
+          .getLockPath(carbonTable.getCarbonTableIdentifier.getTableId)
         val file = FileFactory.getCarbonFile(tableLockPath)
         CarbonUtil.deleteFoldersAndFilesSilent(file)
       }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
index 73d0a89..f4a4ca3 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
@@ -77,10 +77,10 @@
         queryExecution.toRdd.partitions
         // For count(*) queries the explain collector will be disabled, so profiler
         // informations not required in such scenarios.
-        if (null == ExplainCollector.getFormatedOutput) {
+        if (null == ExplainCollector.getFormattedOutput) {
           Seq.empty
         }
-        Seq(Row("== CarbonData Profiler ==\n" + ExplainCollector.getFormatedOutput))
+        Seq(Row("== CarbonData Profiler ==\n" + ExplainCollector.getFormattedOutput))
       } else {
         Seq.empty
       }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala
index 6213caf..61c25e9 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala
@@ -166,8 +166,8 @@
           indexTableDetails.setMajorCompacted(factTableDetails.isMajorCompacted)
           indexTableDetails.setMergedLoadName(factTableDetails.getMergedLoadName)
           indexTableDetails
-            .setModificationOrdeletionTimesStamp(factTableDetails
-              .getModificationOrdeletionTimesStamp)
+            .setModificationOrDeletionTimestamp(factTableDetails
+              .getModificationOrDeletionTimestamp)
           indexTableDetails.setLoadEndTime(factTableDetails.getLoadEndTime)
           indexTableDetails.setVisibility(factTableDetails.getVisibility)
           found = true
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java
index 32cade2..f65715d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java
@@ -93,7 +93,7 @@
         IndexStoreManager.getInstance().getDefaultIndex(table).getIndexFactory();
     CacheableIndex factory = (CacheableIndex) indexFactory;
     List<IndexInputSplit> validDistributables =
-        factory.getAllUncachedDistributables(validSegments, indexExprWrapper);
+        factory.getAllUncached(validSegments, indexExprWrapper);
     if (!validSegments.isEmpty()) {
       this.readCommittedScope = validSegments.get(0).getReadCommittedScope();
     }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
index 9a16c4a..ccb1e2a 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
@@ -267,7 +267,7 @@
               return false;
             }
             loadDetail.setSegmentStatus(SegmentStatus.COMPACTED);
-            loadDetail.setModificationOrdeletionTimesStamp(modificationOrDeletionTimeStamp);
+            loadDetail.setModificationOrDeletionTimestamp(modificationOrDeletionTimeStamp);
             loadDetail.setMergedLoadName(mergedLoadNumber);
           }
         }
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/CGIndexTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/CGIndexTestCase.scala
index 59ca8c1..b2d55a4 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/CGIndexTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/CGIndexTestCase.scala
@@ -45,7 +45,7 @@
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, IndexSchema}
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.expression.conditional.EqualToExpression
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf
 import org.apache.carbondata.core.util.{ByteUtil, CarbonProperties}
@@ -202,7 +202,7 @@
   override def prune(
       filterExp: FilterResolverIntf,
       segmentProperties: SegmentProperties,
-      filterExecuter: FilterExecuter,
+      filterExecuter: FilterExecutor,
       carbonTable: CarbonTable): java.util.List[Blocklet] = {
     val buffer: ArrayBuffer[Expression] = new ArrayBuffer[Expression]()
     val expression = filterExp.getFilterExpression
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/FGIndexTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/FGIndexTestCase.scala
index 75346e1..b641e57 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/FGIndexTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/index/FGIndexTestCase.scala
@@ -43,7 +43,7 @@
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, IndexSchema}
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.expression.conditional.EqualToExpression
-import org.apache.carbondata.core.scan.filter.executer.FilterExecuter
+import org.apache.carbondata.core.scan.filter.executer.FilterExecutor
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf
 import org.apache.carbondata.core.util.path.CarbonTablePath
@@ -197,7 +197,7 @@
   override def prune(
       filterExp: FilterResolverIntf,
       segmentProperties: SegmentProperties,
-      filterExecuter: FilterExecuter,
+      filterExecuter: FilterExecutor,
       carbonTable: CarbonTable): java.util.List[FineGrainBlocklet] = {
     val buffer: ArrayBuffer[Expression] = new ArrayBuffer[Expression]()
     val expression = filterExp.getFilterExpression
diff --git a/pom.xml b/pom.xml
index 7f85d0c..ac52197 100644
--- a/pom.xml
+++ b/pom.xml
@@ -150,7 +150,7 @@
     <!--todo:this can be enabled when presto tests need to be run-->
     <!--<spark.hadoop.hive.metastore.uris>thrift://localhost:8086</spark.hadoop.hive.metastore.uris>-->
     <suite.name>org.apache.carbondata.cluster.sdv.suite.SDVSuites</suite.name>
-    <script.exetension>.sh</script.exetension>
+    <script.extension>.sh</script.extension>
     <carbon.hive.based.metastore>false</carbon.hive.based.metastore>
   </properties>
 
@@ -724,7 +724,7 @@
     <profile>
       <id>windows</id>
       <properties>
-        <script.exetension>.bat</script.exetension>
+        <script.extension>.bat</script.extension>
       </properties>
     </profile>
     <!--    prestodb-->
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
index d216d2c..253a78b 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
@@ -331,7 +331,7 @@
               return false;
             }
             loadDetail.setSegmentStatus(SegmentStatus.COMPACTED);
-            loadDetail.setModificationOrdeletionTimesStamp(modificationOrDeletionTimeStamp);
+            loadDetail.setModificationOrDeletionTimestamp(modificationOrDeletionTimeStamp);
             loadDetail.setMergedLoadName(mergedLoadNumber);
           }
         }