[SYSTEMDS-371] ColGroup Quantization

New ColGroupQuan smiley that internally changes all values to Byte.
It is compressing by using the highest value (or Math.abs(lowest))
in the matrix block, and therefore the bucket sizes are dependent
on that value.

This implementation that does not exploit sparsity but it gives an
alternative col group for columns that are previously uncompressed.

The testing associated with this new lossy representation is based
around either percentage deviation from ideal results (double precision)
or loss threshold calculated from the worst rounding of values in that
input matrix. (they currently show only few percent deviation in average
results)
diff --git a/dev/Tasks-obsolete.txt b/dev/Tasks-obsolete.txt
index 4ef8729..7d30fdc 100644
--- a/dev/Tasks-obsolete.txt
+++ b/dev/Tasks-obsolete.txt
@@ -320,8 +320,8 @@
  * 365 Extended privacy/data exchange constraints                     OK
 
 SYSTEMDS-370 Lossy Compression Blocks
- * 371 ColGroup Quantization
- * 372 ColGroup Base Data change (from Double to ??)
+ * 371 ColGroup Quantization                                          OK (Naive Q8)
+ * 321 ColGroup Base Data change (from Double to ??)
 
 SYSTEMDS-380 Memory Footprint
  * 381 Matrix Block Memory footprint update
diff --git a/src/main/java/org/apache/sysds/runtime/compress/AbstractCompressedMatrixBlock.java b/src/main/java/org/apache/sysds/runtime/compress/AbstractCompressedMatrixBlock.java
index 913563c..2e5b6c7 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/AbstractCompressedMatrixBlock.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/AbstractCompressedMatrixBlock.java
@@ -255,13 +255,14 @@
 		if(!isCompressed() || isEmptyBlock())
 			return super.cmOperations(op);
 		ColGroup grp = _colGroups.get(0);
-		if(grp instanceof ColGroupUncompressed)
-			return ((ColGroupUncompressed) grp).getData().cmOperations(op);
 
-		ColGroupValue grpVal = (ColGroupValue) grp;
-		MatrixBlock vals = grpVal.getValuesAsBlock();
-		MatrixBlock counts = ColGroupValue.getCountsAsBlock(grpVal.getCounts(true));
-		return vals.cmOperations(op, counts);
+		MatrixBlock vals = grp.getValuesAsBlock();
+		if(grp.getIfCountsType()){
+			MatrixBlock counts = ColGroupValue.getCountsAsBlock(grp.getCounts(true));
+			return vals.cmOperations(op, counts);
+		}else{
+			return vals.cmOperations(op);
+		}
 	}
 
 	@Override
@@ -300,13 +301,12 @@
 		if(!isCompressed())
 			return super.sortOperations(right, result);
 		ColGroup grp = _colGroups.get(0);
-		if(grp instanceof ColGroupUncompressed)
-			return ((ColGroupUncompressed) grp).getData().sortOperations(right, result);
+		if(grp.getIfCountsType() != true)
+			return grp.getValuesAsBlock().sortOperations(right, result);
 
 		if(right == null) {
-			ColGroupValue grpVal = (ColGroupValue) grp;
-			MatrixBlock vals = grpVal.getValuesAsBlock();
-			int[] counts = grpVal.getCounts(true);
+			MatrixBlock vals = grp.getValuesAsBlock();
+			int[] counts = grp.getCounts(true);
 			double[] data = (vals.getDenseBlock() != null) ? vals.getDenseBlockValues() : null;
 			SortUtils.sortByValue(0, vals.getNumRows(), data, counts);
 			MatrixBlock counts2 = ColGroupValue.getCountsAsBlock(counts);
diff --git a/src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java b/src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java
index 1085afc..22a810c 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java
@@ -830,7 +830,7 @@
 		// note: UC group never passed into this function
 		for(ColGroup grp : groups)
 			if(!(grp instanceof ColGroupUncompressed) && !(cacheDDC1 && grp instanceof ColGroupDDC1))
-				((ColGroupValue) grp).unaryAggregateOperations(op, ret, rl, ru);
+				((ColGroup) grp).unaryAggregateOperations(op, ret, rl, ru);
 	}
 
 	@Override
@@ -1058,7 +1058,7 @@
 		result.reset();
 		// delegate matrix-vector operation to each column group
 		for(ColGroup grp : colGroups)
-			((ColGroupValue) grp).leftMultByRowVector(vector, result);
+			grp.leftMultByRowVector(vector, result);
 		// post-processing
 		result.recomputeNonZeros();
 	}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/CompressionSettings.java b/src/main/java/org/apache/sysds/runtime/compress/CompressionSettings.java
index 892027f..3deb168 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/CompressionSettings.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/CompressionSettings.java
@@ -52,6 +52,8 @@
 	// Investigate the estimate.
 	public final boolean investigateEstimate;
 
+	public final boolean lossy;
+
 	// Removed the option of LOW_LEVEL_OPT, (only effecting OLE and RLE.)
 	// public final boolean LOW_LEVEL_OPT;
 
@@ -67,10 +69,9 @@
 		this.seed = seed;
 		this.investigateEstimate = investigateEstimate;
 		this.validCompressions = validCompressions;
+		this.lossy = validCompressions.contains(CompressionType.QUAN);
 	}
 
-
-
 	@Override
 	public String toString() {
 		StringBuilder sb = new StringBuilder();
diff --git a/src/main/java/org/apache/sysds/runtime/compress/CompressionSettingsBuilder.java b/src/main/java/org/apache/sysds/runtime/compress/CompressionSettingsBuilder.java
index 9285249..7de49c2 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/CompressionSettingsBuilder.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/CompressionSettingsBuilder.java
@@ -41,6 +41,7 @@
 		validCompressions.add(CompressionType.OLE);
 		validCompressions.add(CompressionType.RLE);
 		validCompressions.add(CompressionType.UNCOMPRESSED);
+		validCompressions.add(CompressionType.QUAN);
 	}
 	
 	public CompressionSettingsBuilder copySettings(CompressionSettings that){
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroup.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroup.java
index bcf15ee..d0e269f 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroup.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroup.java
@@ -34,8 +34,8 @@
 import org.apache.sysds.runtime.matrix.operators.ScalarOperator;
 
 /**
- * Class that stores information about a column group within a compressed matrix block. There are subclasses specific to
- * each compression type.
+ * Class that stores information about a column group within a compressed matrix
+ * block. There are subclasses specific to each compression type.
  */
 public abstract class ColGroup implements Serializable {
 	protected static final Log LOG = LogFactory.getLog(ColGroup.class.getName());
@@ -44,26 +44,30 @@
 	/**
 	 * Public Group types supported
 	 * 
-	 * Note For instance DDC is called DDC not DDC1, or DDC2 which is a specific subtype of the DDC.
+	 * Note For instance DDC is called DDC not DDC1, or DDC2 which is a specific
+	 * subtype of the DDC.
 	 */
 	public enum CompressionType {
 		UNCOMPRESSED, // uncompressed sparse/dense
 		RLE, // RLE bitmap
 		OLE, // OLE bitmap
 		DDC, // Dictionary encoding
-		// QUANTIZE, // Quantize the double values to int 8.
+		QUAN, // Quantize the double values to short
 	}
 
 	/**
 	 * Concrete ColGroupType
 	 * 
-	 * Protected such that outside the ColGroup package it should be unknown which specific subtype is used.
+	 * Protected such that outside the ColGroup package it should be unknown which
+	 * specific subtype is used.
 	 */
 	protected enum ColGroupType {
 		UNCOMPRESSED, // uncompressed sparse/dense
 		RLE, // RLE bitmap
 		OLE, // OLE bitmap
-		DDC1, DDC2,
+		DDC1, // DDC Small Dictionary
+		DDC2, // DDC Large Dictionary
+		QUAN8S, // Qunatized Value.
 	}
 
 	/** The ColGroup Indexes 0 offset, contained in the ColGroup */
@@ -75,7 +79,9 @@
 	/** Number of rows in the matrix, for use by child classes. */
 	protected int _numRows;
 
-	/** Empty constructor, used for serializing into an empty new object of ColGroup. */
+	/**
+	 * Empty constructor, used for serializing into an empty new object of ColGroup.
+	 */
 	protected ColGroup() {
 		this._colIndexes = null;
 		this._numRows = -1;
@@ -84,17 +90,18 @@
 	/**
 	 * Main constructor.
 	 * 
-	 * @param colIndices offsets of the columns in the matrix block that make up the group
+	 * @param colIndices offsets of the columns in the matrix block that make up the
+	 *                   group
 	 * @param numRows    total number of rows in the block
 	 */
 	protected ColGroup(int[] colIndices, int numRows) {
-		if(colIndices == null) {
+		if (colIndices == null) {
 			throw new DMLRuntimeException("null input to ColGroup is invalid");
 		}
-		if(colIndices.length == 0) {
+		if (colIndices.length == 0) {
 			throw new DMLRuntimeException("0 is an invalid number of columns in a ColGroup");
 		}
-		if(numRows < 1) {
+		if (numRows < 1) {
 			throw new DMLRuntimeException(numRows + " is an invalid number of rows in a ColGroup");
 		}
 		_colIndexes = colIndices;
@@ -146,31 +153,35 @@
 	public abstract CompressionType getCompType();
 
 	/**
-	 * Internally get the specific type of ColGroup, this could be extracted from the object but that does not allow for
-	 * nice switches in the code.
+	 * Internally get the specific type of ColGroup, this could be extracted from
+	 * the object but that does not allow for nice switches in the code.
 	 * 
 	 * @return ColGroupType of the object.
 	 */
 	protected abstract ColGroupType getColGroupType();
 
 	public void shiftColIndices(int offset) {
-		for(int i = 0; i < _colIndexes.length; i++)
+		for (int i = 0; i < _colIndexes.length; i++)
 			_colIndexes[i] += offset;
 	}
 
 	/**
-	 * Note: Must be overridden by child classes to account for additional data and metadata
+	 * Note: Must be overridden by child classes to account for additional data and
+	 * metadata
 	 * 
-	 * @return an upper bound on the number of bytes used to store this ColGroup in memory.
+	 * @return an upper bound on the number of bytes used to store this ColGroup in
+	 *         memory.
 	 */
 	public long estimateInMemorySize() {
 		return ColGroupSizes.estimateInMemorySizeGroup(_colIndexes.length);
 	}
 
 	/**
-	 * Decompress the contents of this column group into the specified full matrix block.
+	 * Decompress the contents of this column group into the specified full matrix
+	 * block.
 	 * 
-	 * @param target a matrix block where the columns covered by this column group have not yet been filled in.
+	 * @param target a matrix block where the columns covered by this column group
+	 *               have not yet been filled in.
 	 * @param rl     row lower
 	 * @param ru     row upper
 	 */
@@ -179,9 +190,10 @@
 	/**
 	 * Decompress the contents of this column group into uncompressed packed columns
 	 * 
-	 * @param target          a dense matrix block. The block must have enough space to hold the contents of this column
-	 *                        group.
-	 * @param colIndexTargets array that maps column indices in the original matrix block to columns of target.
+	 * @param target          a dense matrix block. The block must have enough space
+	 *                        to hold the contents of this column group.
+	 * @param colIndexTargets array that maps column indices in the original matrix
+	 *                        block to columns of target.
 	 */
 	public abstract void decompressToBlock(MatrixBlock target, int[] colIndexTargets);
 
@@ -232,7 +244,8 @@
 	}
 
 	/**
-	 * Returns the exact serialized size of column group. This can be used for example for buffer preallocation.
+	 * Returns the exact serialized size of column group. This can be used for
+	 * example for buffer preallocation.
 	 * 
 	 * @return exact serialized size for column group
 	 */
@@ -248,27 +261,76 @@
 	public abstract double get(int r, int c);
 
 	/**
-	 * Multiply the slice of the matrix that this column group represents by a vector on the right.
+	 * Multiply the slice of the matrix that this column group represents by a
+	 * vector on the right. Get the number of values. contained inside the ColGroup.
+	 * 
+	 * @return value at the row/column position
+	 */
+	// public abstract long getValuesSize();
+
+	/**
+	 * Returns the ColGroup as a MatrixBlock. Used as a fall back solution in case a
+	 * operation is not supported. Use in connection to getIfCountsType to get if
+	 * the values are repeated.
+	 * 
+	 * @return Matrix Block of the contained Values. Possibly contained in groups.
+	 */
+	public abstract MatrixBlock getValuesAsBlock();
+
+	/**
+	 * Returns true if in the getValuesAsBlock method returns values in groups (that
+	 * needs to be counted) or individually potentially repeated values
+	 * 
+	 * @return boolean
+	 */
+	public abstract boolean getIfCountsType();
+
+	/**
+	 * Returns the counts of values inside the MatrixBlock returned in
+	 * getValuesAsBlock Throws an exception if the getIfCountsType is false
+	 * 
+	 * @return the count of each value in the MatrixBlock.
+	 */
+	public abstract int[] getCounts();
+
+	/**
+	 * Returns the counts of values inside the MatrixBlock returned in
+	 * getValuesAsBlock Throws an exception if the getIfCountsType is false
+	 * 
+	 * @param includeZero Boolean to specify if zero should be included in the
+	 *                    count.
+	 * @return the count of each value in the MatrixBlock.
+	 */
+	public abstract int[] getCounts(boolean includeZero);
+
+	/**
+	 * Multiply the slice of the matrix that this column group represents by a
+	 * vector on the right.
 	 * 
 	 * @param vector vector to multiply by (tall vector)
 	 * @param result accumulator for holding the result
 	 * @param rl     row lower
-	 * @param ru     row upper if the internal SystemML code that performs the multiplication experiences an error
+	 * @param ru     row upper if the internal SystemML code that performs the
+	 *               multiplication experiences an error
 	 */
 	public abstract void rightMultByVector(MatrixBlock vector, MatrixBlock result, int rl, int ru);
 
 	/**
-	 * Multiply the slice of the matrix that this column group represents by a row vector on the left (the original
-	 * column vector is assumed to be transposed already i.e. its size now is 1xn).
+	 * Multiply the slice of the matrix that this column group represents by a row
+	 * vector on the left (the original column vector is assumed to be transposed
+	 * already i.e. its size now is 1xn).
 	 * 
 	 * @param vector row vector
 	 * @param result matrix block result
 	 */
 	public abstract void leftMultByRowVector(MatrixBlock vector, MatrixBlock result);
 
+	// additional vector-matrix multiplication to avoid DDC uncompression
+	public abstract void leftMultByRowVector(ColGroupDDC vector, MatrixBlock result);
+
 	/**
-	 * Perform the specified scalar operation directly on the compressed column group, without decompressing individual
-	 * cells if possible.
+	 * Perform the specified scalar operation directly on the compressed column
+	 * group, without decompressing individual cells if possible.
 	 * 
 	 * @param op operation to perform
 	 * @return version of this column group with the operation applied
@@ -276,15 +338,26 @@
 	public abstract ColGroup scalarOperation(ScalarOperator op);
 
 	/**
-	 * Unary Aggregate operator, since aggregate operators require new object output, the output becomes an uncompressed
-	 * matrix.
+	 * Unary Aggregate operator, since aggregate operators require new object
+	 * output, the output becomes an uncompressed matrix.
 	 * 
 	 * @param op     The operator used
-	 * @param result the output matrix block.
+	 * @param result Rhe output matrix block.
 	 */
 	public abstract void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result);
 
 	/**
+	 * Unary Aggregate operator, since aggregate operators require new object
+	 * output, the output becomes an uncompressed matrix.
+	 * 
+	 * @param op     The operator used
+	 * @param result The output matrix block.
+	 * @param rl     The Starting Row to do aggregation from
+	 * @param ru     The last Row to do aggregation to (not included)
+	 */
+	public abstract void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result, int rl, int ru);
+
+	/**
 	 * Create a column group iterator for a row index range.
 	 * 
 	 * @param rl        row lower index, inclusive
@@ -296,8 +369,8 @@
 	public abstract Iterator<IJV> getIterator(int rl, int ru, boolean inclZeros, boolean rowMajor);
 
 	/**
-	 * Create a dense row iterator for a row index range. This iterator implies the inclusion of zeros and row-major
-	 * iteration order.
+	 * Create a dense row iterator for a row index range. This iterator implies the
+	 * inclusion of zeros and row-major iteration order.
 	 * 
 	 * @param rl row lower index, inclusive
 	 * @param ru row upper index, exclusive
@@ -315,8 +388,8 @@
 	public abstract void countNonZerosPerRow(int[] rnnz, int rl, int ru);
 
 	/**
-	 * Base class for column group row iterators. We do not implement the default Iterator interface in order to avoid
-	 * unnecessary value copies per group.
+	 * Base class for column group row iterators. We do not implement the default
+	 * Iterator interface in order to avoid unnecessary value copies per group.
 	 */
 	protected abstract class ColGroupRowIterator {
 		public abstract void next(double[] buff, int rowIx, int segIx, boolean last);
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupDDC.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupDDC.java
index 825836b..0f975d5 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupDDC.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupDDC.java
@@ -22,6 +22,7 @@
 import java.util.Arrays;
 import java.util.Iterator;
 
+import org.apache.commons.lang.NotImplementedException;
 import org.apache.sysds.runtime.compress.UncompressedBitmap;
 import org.apache.sysds.runtime.functionobjects.Builtin;
 import org.apache.sysds.runtime.functionobjects.KahanFunction;
@@ -82,11 +83,12 @@
 
 	@Override
 	public void decompressToBlock(MatrixBlock target, int colpos) {
-		int nrow = getNumRows();
-		for(int i = 0; i < nrow; i++) {
-			double cellVal = getData(i, colpos);
-			target.quickSetValue(i, 0, cellVal);
-		}
+		throw new NotImplementedException("Old Function Not In use");
+		// int nrow = getNumRows();
+		// for(int i = 0; i < nrow; i++) {
+		// 	double cellVal = getData(i, colpos);
+		// 	target.quickSetValue(i, 0, cellVal);
+		// }
 	}
 
 	@Override
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupFactory.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupFactory.java
index cc2009a..e33cb3e 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupFactory.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupFactory.java
@@ -234,6 +234,8 @@
 				return new ColGroupOLE(colIndexes, rlen, ubm);
 			case UNCOMPRESSED:
 				return new ColGroupUncompressed(colIndexes, rawMatrixBlock, compSettings);
+			case QUAN:
+				return new ColGroupQuan(colIndexes, rlen, ubm);
 			default:
 				throw new DMLCompressionException("Not implemented ColGroup Type compressed in factory.");
 		}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupIO.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupIO.java
index b1c7502..f72e307 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupIO.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupIO.java
@@ -73,8 +73,11 @@
 				case DDC2:
 					grp = new ColGroupDDC2();
 					break;
+				case QUAN8S:
+					grp = new ColGroupQuan();
+					break;
 				default:
-					throw new DMLRuntimeException("Unsupported ColGroup Type used");
+					throw new DMLRuntimeException("Unsupported ColGroup Type used:  "  + ctype);
 			}
 
 			// Deserialize and add column group (flag for shared dictionary passed
@@ -113,7 +116,7 @@
 		for(ColGroup grp : _colGroups) {
 			// TODO save DDC Dict sharing smarter.
 			boolean shared = (grp instanceof ColGroupDDC1 && _sharedDDC1Dict && grp.getNumCols() == 1);
-			out.writeByte(grp.getCompType().ordinal());
+			out.writeByte(grp.getColGroupType().ordinal());
 			grp.write(out, skipDict & shared); 
 			skipDict |= shared;
 		}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupQuan.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupQuan.java
new file mode 100644
index 0000000..16638d2
--- /dev/null
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupQuan.java
@@ -0,0 +1,513 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.sysds.runtime.compress.colgroup;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.DoubleSummaryStatistics;
+import java.util.Iterator;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.sysds.runtime.DMLCompressionException;
+import org.apache.sysds.runtime.DMLRuntimeException;
+import org.apache.sysds.runtime.DMLScriptException;
+import org.apache.sysds.runtime.compress.UncompressedBitmap;
+import org.apache.sysds.runtime.functionobjects.Builtin;
+import org.apache.sysds.runtime.functionobjects.Builtin.BuiltinCode;
+import org.apache.sysds.runtime.functionobjects.KahanPlus;
+import org.apache.sysds.runtime.functionobjects.KahanPlusSq;
+import org.apache.sysds.runtime.functionobjects.Multiply;
+import org.apache.sysds.runtime.functionobjects.ReduceAll;
+import org.apache.sysds.runtime.functionobjects.ReduceCol;
+import org.apache.sysds.runtime.functionobjects.ReduceRow;
+import org.apache.sysds.runtime.matrix.data.IJV;
+import org.apache.sysds.runtime.matrix.data.MatrixBlock;
+import org.apache.sysds.runtime.matrix.operators.AggregateUnaryOperator;
+import org.apache.sysds.runtime.matrix.operators.ScalarOperator;
+
+public class ColGroupQuan extends ColGroup {
+
+	private static final long serialVersionUID = -9157476271360522008L;
+
+	protected double _scale;
+	protected byte[] _values;
+
+	protected ColGroupQuan() {
+		super();
+	}
+
+	protected ColGroupQuan(int[] colIndexes, int numRows, UncompressedBitmap ubm) {
+		super(colIndexes, numRows);
+		_values = new byte[ubm.getNumColumns() * numRows];
+
+		double[] valuesFullPrecision = ubm.getValues();
+		DoubleSummaryStatistics stat = Arrays.stream(valuesFullPrecision).summaryStatistics();
+		double max = Math.abs(Math.max(stat.getMax(), Math.abs(stat.getMin())));
+		if(Double.isInfinite(max)){
+			throw new DMLCompressionException("Invalid ColGroupQuan, can't quantize Infinite value.");
+		} else if (max == 0){
+			_scale = 1;
+			LOG.error("ColGroup! column with only 0 values good excuse to make new ColGroup");
+		} else{
+			_scale = max / (double) (Byte.MAX_VALUE);
+		}
+		for (int i = 0; i < valuesFullPrecision.length; i++) {
+			int[] runs = ubm.getOffsetsList(i).extractValues();
+			double curV = valuesFullPrecision[i];
+			double scaledVal = curV / _scale;
+			if(Double.isNaN(scaledVal) || Double.isInfinite(scaledVal)){
+				throw new DMLRuntimeException("Something went wrong in scaling values");
+			}
+			byte scaledValQuan = (byte) (scaledVal);
+			for (int j = 0; j < ubm.getOffsetsList(i).size(); j++) {
+				_values[runs[j]] = scaledValQuan;
+			}
+		}
+	}
+
+	@Override
+	public boolean getIfCountsType(){
+		return false;
+	}
+
+	private ColGroupQuan(int[] colIndexes, double scale, byte[] values) {
+		super(colIndexes, values.length / colIndexes.length);
+		this._scale = scale;
+		this._values = values;
+	}
+
+	@Override
+	public CompressionType getCompType() {
+		return CompressionType.QUAN;
+	}
+
+	@Override
+	protected ColGroupType getColGroupType() {
+		return ColGroupType.QUAN8S;
+	}
+
+	@Override
+	public void decompressToBlock(MatrixBlock target, int rl, int ru) {
+		if (_values == null || _values.length == 0) {
+			return;
+		}
+		for (int row = rl; row < ru; row++) {
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+				int col = _colIndexes[colIx];
+				byte qVal = _values[row * colIx + row];
+				double val = qVal * _scale;
+				target.quickSetValue(row, col, val);
+			}
+		}
+	}
+
+	@Override
+	public void decompressToBlock(MatrixBlock target, int[] colIndexTargets) {
+		if (_values == null || _values.length == 0) {
+			return;
+		}
+		for (int row = 0; row < _numRows; row++) {
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+				int col = _colIndexes[colIx];
+				double val = _values[row * colIx + row] * _scale;
+				target.quickSetValue(row, col, val);
+			}
+		}
+	}
+
+	@Override
+	public void decompressToBlock(MatrixBlock target, int colpos) {
+		if (_values == null || _values.length == 0)
+			return;
+
+		/**
+		 * target.getDenseBlockValues() because this decompress is used for
+		 * TransposeSelfMatrixMult meaning that the result is allocated directly into
+		 * the result row or col matrix with the same code !
+		 */
+		// double[] c = target.getDenseBlockValues();
+
+		// for (int row = 0; row < _numRows; row++) {
+		// c[row] = (double)_values[row * colpos + row] * _scale;
+		// }
+		// target.setNonZeros(_numRows);
+
+		double[] c = target.getDenseBlockValues();
+		int nnz = 0;
+
+		for (int row = 0; row < _numRows; row++) {
+			double val = _values[row * colpos + row];
+			if (val != 0) {
+				nnz++;
+			}
+			c[row] = val * _scale;
+		}
+		target.setNonZeros(nnz);
+	}
+
+	@Override
+	public void write(DataOutput out) throws IOException {
+
+		out.writeInt(_numRows);
+		out.writeInt(_colIndexes.length);
+
+		for (int i = 0; i < _colIndexes.length; i++)
+			out.writeInt(_colIndexes[i]);
+
+		for (int i = 0; i < _values.length; i++)
+			out.writeByte(_values[i]);
+
+		out.writeDouble(_scale);
+	}
+
+	@Override
+	public void readFields(DataInput in) throws IOException {
+		_numRows = in.readInt();
+		int numCols = in.readInt();
+
+		_colIndexes = new int[numCols];
+		for (int i = 0; i < _colIndexes.length; i++)
+			_colIndexes[i] = in.readInt();
+
+		_values = new byte[_numRows * numCols];
+		for (int i = 0; i < _values.length; i++)
+			_values[i] = in.readByte();
+
+		_scale = in.readDouble();
+	}
+
+	@Override
+	public long getExactSizeOnDisk() {
+		long ret = 8; // header
+		ret += 4 * _colIndexes.length;
+		ret += _values.length;
+		return ret;
+	}
+
+	@Override
+	public double get(int r, int c) {
+		int colIx = Arrays.binarySearch(_colIndexes, c);
+		return _values[r * colIx + r] * _scale;
+	}
+
+	@Override
+	public void rightMultByVector(MatrixBlock vector, MatrixBlock result, int rl, int ru) {
+		double[] b = ColGroupConverter.getDenseVector(vector);
+		double[] c = result.getDenseBlockValues();
+
+		// prepare reduced rhs w/ relevant values
+		double[] sb = new double[_colIndexes.length];
+		for (int j = 0; j < _colIndexes.length; j++) {
+			sb[j] = b[_colIndexes[j]];
+		}
+
+		for (int row = rl; row < ru; row++) {
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+				c[row] += (_values[row * colIx + row] * _scale) * sb[colIx];
+			}
+		}
+	}
+
+	@Override
+	public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result) {
+		double[] a = ColGroupConverter.getDenseVector(vector);
+		double[] c = result.getDenseBlockValues();
+
+		for (int row = 0; row < _numRows; row++) {
+			double val = _values[row] * _scale;
+			for (int col = 0; col < _colIndexes.length; col++) {
+				double value = val * a[row * col + row];
+				c[_colIndexes[col]] += value;
+			}
+		}
+
+	}
+
+	@Override
+	public void leftMultByRowVector(ColGroupDDC vector, MatrixBlock result) {
+		throw new NotImplementedException();
+	}
+
+	@Override
+	public ColGroup scalarOperation(ScalarOperator op) {
+		if (op.fn instanceof Multiply) {
+			return new ColGroupQuan(_colIndexes, op.executeScalar(_scale), _values);
+		}
+		double[] temp = new double[_values.length];
+		double max = op.executeScalar((double)_values[0] * _scale);
+		temp[0] = max;
+		for (int i = 1; i < _values.length; i++) {
+			temp[i] = op.executeScalar((double)_values[i] * _scale);
+			double absTemp = Math.abs(temp[i]);
+			if (absTemp > max) {
+				max = absTemp;
+			}
+		}
+		byte[] newValues = new byte[_values.length];
+		double newScale = max / (double) (Byte.MAX_VALUE);
+		for (int i = 0; i < _values.length; i++) {
+			newValues[i] = (byte) ((double)temp[i] / newScale);
+		}
+
+		return new ColGroupQuan(_colIndexes, newScale, newValues);
+	}
+
+	@Override
+	public void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result) {
+		unaryAggregateOperations(op, result, 0, getNumRows());
+	}
+
+	@Override
+	public void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result, int rl, int ru) {
+
+		if (op.aggOp.increOp.fn instanceof KahanPlus) {
+
+			// Not using KahnObject because we already lost some of that precision anyway in
+			// quantization.
+			if (op.indexFn instanceof ReduceAll)
+				computeSum(result);
+			else if (op.indexFn instanceof ReduceCol)
+				computeRowSums(result, rl, ru);
+			else if (op.indexFn instanceof ReduceRow)
+				computeColSums(result);
+		} else if (op.aggOp.increOp.fn instanceof KahanPlusSq) {
+			if (op.indexFn instanceof ReduceAll)
+				computeSumSq(result);
+			else if (op.indexFn instanceof ReduceCol)
+				computeRowSumsSq(result, rl, ru);
+			else if (op.indexFn instanceof ReduceRow)
+				computeColSumsSq(result);
+		} else if (op.aggOp.increOp.fn instanceof Builtin
+				&& (((Builtin) op.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MAX
+						|| ((Builtin) op.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MIN)) {
+			Builtin builtin = (Builtin) op.aggOp.increOp.fn;
+			// min and max (reduceall/reducerow over tuples only)
+
+			if (op.indexFn instanceof ReduceAll)
+				computeMxx(result, builtin, _zeros);
+			else if (op.indexFn instanceof ReduceCol)
+				computeRowMxx(result, builtin, rl, ru);
+			else if (op.indexFn instanceof ReduceRow)
+				computeColMxx(result, builtin, _zeros);
+		} else {
+			throw new DMLScriptException("Unknown UnaryAggregate operator on CompressedMatrixBlock");
+		}
+	}
+
+	protected void computeSum(MatrixBlock result) {
+		// TODO Potential speedup use vector instructions/group in batches of 32
+		long sum = 0L;
+		for (int i = 0; i < _values.length; i++) {
+			sum += (long) _values[i];
+		}
+		result.quickSetValue(0, 0, result.getValue(0, 0) + (double) sum * _scale);
+	}
+
+	protected void computeSumSq(MatrixBlock result) {
+
+		double sumsq = 0;
+		for (int i = 0; i < _values.length; i++) {
+			double v =  _values[i] * _scale;
+			sumsq += v*v;
+		}
+		result.quickSetValue(0, 0, result.getValue(0, 0) + sumsq);
+	}
+
+	protected void computeRowSums(MatrixBlock result, int rl, int ru) {
+		if (_colIndexes.length < 256) {
+			short[] rowSums = new short[ru - rl];
+			for (int row = rl; row < ru; row++) {
+				for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+					rowSums[row - rl] += _values[row * colIx + row];
+				}
+			}
+			for (int row = rl; row < ru; row++) {
+				result.quickSetValue(row, 0, result.getValue(row, 0) + (double) rowSums[row - rl] * _scale);
+			}
+		} else {
+			throw new NotImplementedException("Not Implemented number of columns in ColGroupQuan row sum");
+		}
+	}
+
+	protected void computeRowSumsSq(MatrixBlock result, int rl, int ru) {
+		if (_colIndexes.length < 256) {
+			float[] rowSumSq = new float[ru - rl];
+			for (int row = rl; row < ru; row++) {
+				for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+					double v = (double) _values[row * colIx + row] * _scale;
+					rowSumSq[row - rl] += v*v;
+				}
+			}
+
+			for (int row = rl; row < ru; row++) {
+				result.quickSetValue(row, 0, result.getValue(row, 0) + rowSumSq[row - rl]);
+			}
+
+		} else {
+			throw new NotImplementedException("Not Implemented number of columns in ColGroupQuan row sum");
+		}
+	}
+
+	protected void computeColSums(MatrixBlock result) {
+		if (_numRows < 256) {
+			short[] colSums = new short[_colIndexes.length];
+			for (int i = 0; i < _values.length; i++) {
+				colSums[i / _numRows] += _values[i];
+			}
+
+			for (int col = 0; col < _colIndexes.length; col++) {
+				result.quickSetValue(0, _colIndexes[col], colSums[col] * _scale);
+			}
+		} else if (_numRows < 16777216) { // (Int max + 1) / (short max + 1)
+			int[] colSums = new int[_colIndexes.length];
+			for (int i = 0; i < _values.length; i++) {
+				colSums[i / _numRows] += _values[i];
+			}
+
+			for (int col = 0; col < _colIndexes.length; col++) {
+				result.quickSetValue(0, _colIndexes[col], colSums[col] * _scale);
+			}
+		} else {
+			double[] colSums = new double[_colIndexes.length];
+			for (int i = 0; i < _values.length; i++) {
+				colSums[i / _numRows] += _values[i];
+			}
+
+			for (int col = 0; col < _colIndexes.length; col++) {
+				result.quickSetValue(0, _colIndexes[col], colSums[col] * _scale);
+			}
+		}
+	}
+
+	protected void computeColSumsSq(MatrixBlock result) {
+	
+		double[] sumsq = new double[_colIndexes.length];
+		for (int i = 0; i < _values.length; i++) {
+			double v =  _values[i] * _scale;
+			sumsq[i / _numRows] += v*v;
+		}
+		
+		for (int col = 0; col < _colIndexes.length; col++) {
+			result.quickSetValue(0, _colIndexes[col], sumsq[col]);
+		}
+		
+	}
+
+	protected void computeRowMxx(MatrixBlock result, Builtin builtin, int rl, int ru) {
+		double[] c = result.getDenseBlockValues();
+		for (int row = rl; row < ru; row++) {
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+				double v = ((double)_values[row * colIx + row]) * _scale;
+				// System.out.println(v);
+				c[row] = builtin.execute(c[row], v);
+			}
+		}
+		
+	}
+
+	protected void computeMxx(MatrixBlock result, Builtin builtin, boolean zeros) {
+
+		double res = 0;
+		for (int i = 0; i < _values.length; i++) {
+			res = builtin.execute(res, _values[i] * _scale);
+		}
+		result.quickSetValue(0, 0, res);
+	}
+
+	protected void computeColMxx(MatrixBlock result, Builtin builtin, boolean zeros) {
+		double[] colRes = new double[_colIndexes.length];
+		for (int i = 0; i < _values.length; i++) {
+			colRes[i / _numRows] = builtin.execute(colRes[i / _numRows], _values[i] * _scale);
+		}
+
+		for (int col = 0; col < _colIndexes.length; col++) {
+			result.quickSetValue(0, _colIndexes[col], colRes[col]);
+		}
+	}
+
+	@Override
+	public Iterator<IJV> getIterator(int rl, int ru, boolean inclZeros, boolean rowMajor) {
+		return new QuanValueIterator();
+	}
+
+	private class QuanValueIterator implements Iterator<IJV> {
+
+		@Override
+		public boolean hasNext() {
+			throw new NotImplementedException("Not Implemented");
+		}
+
+		@Override
+		public IJV next() {
+			throw new NotImplementedException("Not Implemented");
+		}
+
+	}
+
+	@Override
+	public ColGroupRowIterator getRowIterator(int rl, int ru) {
+
+		return new QuanRowIterator();
+	}
+
+	private class QuanRowIterator extends ColGroupRowIterator {
+
+		@Override
+		public void next(double[] buff, int rowIx, int segIx, boolean last) {
+			throw new NotImplementedException("Not Implemented");
+		}
+
+	}
+
+	@Override
+	public void countNonZerosPerRow(int[] rnnz, int rl, int ru) {
+		// TODO Auto-generated method stub
+		for (int row = rl; row < ru; row++) {
+			int lnnz = 0;
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
+				lnnz += (_values[row * colIx + row] != 0) ? 1 : 0;
+			}
+			rnnz[row - rl] += lnnz;
+		}
+	}
+
+	@Override
+	public MatrixBlock getValuesAsBlock() {
+		// TODO Auto-generated method stub
+		MatrixBlock target = new MatrixBlock(_numRows, _colIndexes.length, 0.0);
+		decompressToBlock(target, _colIndexes);
+		return target;
+	}
+
+	@Override
+	public int[] getCounts() {
+		throw new DMLCompressionException(
+				"Invalid function call, the counts in Uncompressed Col Group is always 1 for each value");
+	}
+
+	@Override
+	public int[] getCounts(boolean includeZero) {
+		throw new DMLCompressionException(
+				"Invalid function call, the counts in Uncompressed Col Group is always 1 for each value");
+	}
+
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupSizes.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupSizes.java
index 0777e12..5b39ed2 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupSizes.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupSizes.java
@@ -130,4 +130,11 @@
 		size += MatrixBlock.estimateSizeInMemory(nrRows, nrColumns, sparsity);
 		return size;
 	}
+
+	public static long estimateInMemorySizeQuan(int nrRows, int nrColumns){
+		long size = estimateInMemorySizeGroup(nrColumns);
+		size += 8; // scale value
+		size += MemoryEstimates.byteArrayCost(nrRows*nrColumns);
+		return size;
+	}
 }
\ No newline at end of file
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupUncompressed.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupUncompressed.java
index 6d0d865..00e1563 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupUncompressed.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupUncompressed.java
@@ -26,6 +26,7 @@
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.commons.lang.NotImplementedException;
 import org.apache.sysds.runtime.DMLCompressionException;
 import org.apache.sysds.runtime.compress.CompressionSettings;
 import org.apache.sysds.runtime.data.SparseBlock;
@@ -40,16 +41,16 @@
 import org.apache.sysds.runtime.util.SortUtils;
 
 /**
- * Column group type for columns that are stored as dense arrays of doubles. Uses a MatrixBlock internally to store the
- * column contents.
+ * Column group type for columns that are stored as dense arrays of doubles.
+ * Uses a MatrixBlock internally to store the column contents.
  * 
  */
 public class ColGroupUncompressed extends ColGroup {
 	private static final long serialVersionUID = 4870546053280378891L;
 
 	/**
-	 * We store the contents of the columns as a MatrixBlock to take advantage of high-performance routines available
-	 * for this data structure.
+	 * We store the contents of the columns as a MatrixBlock to take advantage of
+	 * high-performance routines available for this data structure.
 	 */
 	private MatrixBlock _data;
 
@@ -64,11 +65,12 @@
 	/**
 	 * Main constructor for Uncompressed ColGroup.
 	 * 
-	 * @param colIndicesList Indices (relative to the current block) of the columns that this column group represents.
-	 * @param rawBlock       The uncompressed block; uncompressed data must be present at the time that the constructor
-	 *                       is called
-	 * @param compSettings   The Settings for how to compress this block, Here using information about the raw block if
-	 *                       it is transposed.
+	 * @param colIndicesList Indices (relative to the current block) of the columns
+	 *                       that this column group represents.
+	 * @param rawBlock       The uncompressed block; uncompressed data must be
+	 *                       present at the time that the constructor is called
+	 * @param compSettings   The Settings for how to compress this block, Here using
+	 *                       information about the raw block if it is transposed.
 	 */
 	public ColGroupUncompressed(int[] colIndicesList, MatrixBlock rawBlock, CompressionSettings compSettings) {
 		super(colIndicesList, compSettings.transposeInput ? rawBlock.getNumColumns() : rawBlock.getNumRows());
@@ -80,14 +82,14 @@
 		_data = new MatrixBlock(numRows, _colIndexes.length, rawBlock.isInSparseFormat());
 
 		// ensure sorted col indices
-		if(!SortUtils.isSorted(0, _colIndexes.length, _colIndexes))
+		if (!SortUtils.isSorted(0, _colIndexes.length, _colIndexes))
 			Arrays.sort(_colIndexes);
 
 		// special cases empty blocks
-		if(rawBlock.isEmptyBlock(false))
+		if (rawBlock.isEmptyBlock(false))
 			return;
 		// special cases full block
-		if(!compSettings.transposeInput && _data.getNumColumns() == rawBlock.getNumColumns()) {
+		if (!compSettings.transposeInput && _data.getNumColumns() == rawBlock.getNumColumns()) {
 			_data.copy(rawBlock);
 			return;
 		}
@@ -95,26 +97,27 @@
 		// dense implementation for dense and sparse matrices to avoid linear search
 		int m = numRows;
 		int n = _colIndexes.length;
-		for(int i = 0; i < m; i++) {
-			for(int j = 0; j < n; j++) {
-				double val = compSettings.transposeInput ?
-					rawBlock.quickGetValue(_colIndexes[j], i) :
-					rawBlock.quickGetValue(i, _colIndexes[j]);
+		for (int i = 0; i < m; i++) {
+			for (int j = 0; j < n; j++) {
+				double val = compSettings.transposeInput ? rawBlock.quickGetValue(_colIndexes[j], i)
+						: rawBlock.quickGetValue(i, _colIndexes[j]);
 				_data.appendValue(i, j, val);
 			}
 		}
 		_data.examSparsity();
 
 		// convert sparse MCSR to read-optimized CSR representation
-		if(_data.isInSparseFormat()) {
+		if (_data.isInSparseFormat()) {
 			_data = new MatrixBlock(_data, Type.CSR, false);
 		}
 	}
 
 	/**
-	 * Constructor for creating temporary decompressed versions of one or more compressed column groups.
+	 * Constructor for creating temporary decompressed versions of one or more
+	 * compressed column groups.
 	 * 
-	 * @param groupsToDecompress compressed columns to subsume. Must contain at least one element.
+	 * @param groupsToDecompress compressed columns to subsume. Must contain at
+	 *                           least one element.
 	 */
 	public ColGroupUncompressed(List<ColGroup> groupsToDecompress) {
 		super(mergeColIndices(groupsToDecompress), groupsToDecompress.get(0)._numRows);
@@ -122,20 +125,21 @@
 		// Invert the list of column indices
 		int maxColIndex = _colIndexes[_colIndexes.length - 1];
 		int[] colIndicesInverted = new int[maxColIndex + 1];
-		for(int i = 0; i < _colIndexes.length; i++) {
+		for (int i = 0; i < _colIndexes.length; i++) {
 			colIndicesInverted[_colIndexes[i]] = i;
 		}
 
 		// Create the buffer that holds the uncompressed data, packed together
 		_data = new MatrixBlock(_numRows, _colIndexes.length, false);
 
-		for(ColGroup colGroup : groupsToDecompress) {
+		for (ColGroup colGroup : groupsToDecompress) {
 			colGroup.decompressToBlock(_data, colIndicesInverted);
 		}
 	}
 
 	/**
-	 * Constructor for internal use. Used when a method needs to build an instance of this class from scratch.
+	 * Constructor for internal use. Used when a method needs to build an instance
+	 * of this class from scratch.
 	 * 
 	 * @param colIndices column mapping for this column group
 	 * @param numRows    number of rows in the column, for passing to the superclass
@@ -168,20 +172,21 @@
 	/**
 	 * Subroutine of constructor.
 	 * 
-	 * @param groupsToDecompress input to the constructor that decompresses into a temporary UncompressedColGroup
+	 * @param groupsToDecompress input to the constructor that decompresses into a
+	 *                           temporary UncompressedColGroup
 	 * @return a merged set of column indices across all those groups
 	 */
 	private static int[] mergeColIndices(List<ColGroup> groupsToDecompress) {
 		// Pass 1: Determine number of columns
 		int sz = 0;
-		for(ColGroup colGroup : groupsToDecompress) {
+		for (ColGroup colGroup : groupsToDecompress) {
 			sz += colGroup.getNumCols();
 		}
 
 		// Pass 2: Copy column offsets out
 		int[] ret = new int[sz];
 		int pos = 0;
-		for(ColGroup colGroup : groupsToDecompress) {
+		for (ColGroup colGroup : groupsToDecompress) {
 			int[] tmp = colGroup.getColIndices();
 			System.arraycopy(tmp, 0, ret, pos, tmp.length);
 			pos += tmp.length;
@@ -200,10 +205,10 @@
 	@Override
 	public void decompressToBlock(MatrixBlock target, int rl, int ru) {
 		// empty block, nothing to add to output
-		if(_data.isEmptyBlock(false))
+		if (_data.isEmptyBlock(false))
 			return;
-		for(int row = rl; row < ru; row++) {
-			for(int colIx = 0; colIx < _colIndexes.length; colIx++) {
+		for (int row = rl; row < ru; row++) {
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++) {
 				int col = _colIndexes[colIx];
 				double cellVal = _data.quickGetValue(row, colIx);
 				target.quickSetValue(row, col, cellVal);
@@ -214,12 +219,12 @@
 	@Override
 	public void decompressToBlock(MatrixBlock target, int[] colIndexTargets) {
 		// empty block, nothing to add to output
-		if(_data.isEmptyBlock(false)) {
+		if (_data.isEmptyBlock(false)) {
 			return;
 		}
 		// Run through the rows, putting values into the appropriate locations
-		for(int row = 0; row < _data.getNumRows(); row++) {
-			for(int colIx = 0; colIx < _data.getNumColumns(); colIx++) {
+		for (int row = 0; row < _data.getNumRows(); row++) {
+			for (int colIx = 0; colIx < _data.getNumColumns(); colIx++) {
 				int origMatrixColIx = getColIndex(colIx);
 				int col = colIndexTargets[origMatrixColIx];
 				double cellVal = _data.quickGetValue(row, colIx);
@@ -231,11 +236,11 @@
 	@Override
 	public void decompressToBlock(MatrixBlock target, int colpos) {
 		// empty block, nothing to add to output
-		if(_data.isEmptyBlock(false)) {
+		if (_data.isEmptyBlock(false)) {
 			return;
 		}
 		// Run through the rows, putting values into the appropriate locations
-		for(int row = 0; row < _data.getNumRows(); row++) {
+		for (int row = 0; row < _data.getNumRows(); row++) {
 			double cellVal = _data.quickGetValue(row, colpos);
 			// Apparently rows are cols here.
 			target.quickSetValue(0, row, cellVal);
@@ -246,7 +251,7 @@
 	public double get(int r, int c) {
 		// find local column index
 		int ix = Arrays.binarySearch(_colIndexes, c);
-		if(ix < 0)
+		if (ix < 0)
 			throw new RuntimeException("Column index " + c + " not in uncompressed group.");
 
 		// uncompressed get value
@@ -261,7 +266,7 @@
 		MatrixBlock shortVector = new MatrixBlock(clen, 1, false);
 		shortVector.allocateDenseBlock();
 		double[] b = shortVector.getDenseBlockValues();
-		for(int colIx = 0; colIx < clen; colIx++)
+		for (int colIx = 0; colIx < clen; colIx++)
 			b[colIx] = vector.quickGetValue(_colIndexes[colIx], 0);
 		shortVector.recomputeNonZeros();
 
@@ -276,7 +281,7 @@
 		MatrixBlock shortVector = new MatrixBlock(clen, 1, false);
 		shortVector.allocateDenseBlock();
 		double[] b = shortVector.getDenseBlockValues();
-		for(int colIx = 0; colIx < clen; colIx++)
+		for (int colIx = 0; colIx < clen; colIx++)
 			b[colIx] = vector.quickGetValue(_colIndexes[colIx], 0);
 		shortVector.recomputeNonZeros();
 
@@ -290,22 +295,27 @@
 		LibMatrixMult.matrixMult(vector, _data, pret);
 
 		// copying partialResult to the proper indices of the result
-		if(!pret.isEmptyBlock(false)) {
+		if (!pret.isEmptyBlock(false)) {
 			double[] rsltArr = result.getDenseBlockValues();
-			for(int colIx = 0; colIx < _colIndexes.length; colIx++)
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++)
 				rsltArr[_colIndexes[colIx]] = pret.quickGetValue(0, colIx);
 			result.recomputeNonZeros();
 		}
 	}
 
+	@Override
+	public void leftMultByRowVector(ColGroupDDC vector, MatrixBlock result) {
+		throw new NotImplementedException();
+	}
+
 	public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int k) {
 		MatrixBlock pret = new MatrixBlock(1, _colIndexes.length, false);
 		LibMatrixMult.matrixMult(vector, _data, pret, k);
 
 		// copying partialResult to the proper indices of the result
-		if(!pret.isEmptyBlock(false)) {
+		if (!pret.isEmptyBlock(false)) {
 			double[] rsltArr = result.getDenseBlockValues();
-			for(int colIx = 0; colIx < _colIndexes.length; colIx++)
+			for (int colIx = 0; colIx < _colIndexes.length; colIx++)
 				rsltArr[_colIndexes[colIx]] = pret.quickGetValue(0, colIx);
 			result.recomputeNonZeros();
 		}
@@ -325,14 +335,14 @@
 		LibMatrixAgg.aggregateUnaryMatrix(_data, ret, op);
 
 		// shift result into correct column indexes
-		if(op.indexFn instanceof ReduceRow) {
+		if (op.indexFn instanceof ReduceRow) {
 			// shift partial results, incl corrections
-			for(int i = _colIndexes.length - 1; i >= 0; i--) {
+			for (int i = _colIndexes.length - 1; i >= 0; i--) {
 				double val = ret.quickGetValue(0, i);
 				ret.quickSetValue(0, i, 0);
 				ret.quickSetValue(0, _colIndexes[i], val);
-				if(op.aggOp.existsCorrection())
-					for(int j = 1; j < ret.getNumRows(); j++) {
+				if (op.aggOp.existsCorrection())
+					for (int j = 1; j < ret.getNumRows(); j++) {
 						double corr = ret.quickGetValue(j, i);
 						ret.quickSetValue(j, i, 0);
 						ret.quickSetValue(j, _colIndexes[i], corr);
@@ -342,6 +352,11 @@
 	}
 
 	@Override
+	public void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result, int rl, int ru) {
+		throw new NotImplementedException("Unimplemented Specific Sub ColGroup Aggregation Operation");
+	}
+
+	@Override
 	public void readFields(DataInput in) throws IOException {
 		// read col contents (w/ meta data)
 		_data = new MatrixBlock();
@@ -351,7 +366,7 @@
 		// read col indices
 		int numCols = _data.getNumColumns();
 		_colIndexes = new int[numCols];
-		for(int i = 0; i < numCols; i++)
+		for (int i = 0; i < numCols; i++)
 			_colIndexes[i] = in.readInt();
 	}
 
@@ -362,7 +377,7 @@
 
 		// write col indices
 		int len = _data.getNumColumns();
-		for(int i = 0; i < len; i++)
+		for (int i = 0; i < len; i++)
 			out.writeInt(_colIndexes[i]);
 	}
 
@@ -373,7 +388,7 @@
 
 	@Override
 	public void countNonZerosPerRow(int[] rnnz, int rl, int ru) {
-		for(int i = rl; i < ru; i++)
+		for (int i = rl; i < ru; i++)
 			rnnz[i - rl] += _data.recomputeNonZeros(i, i, 0, _data.getNumColumns() - 1);
 	}
 
@@ -409,7 +424,7 @@
 
 		@Override
 		public boolean hasNext() {
-			return(_rpos < _ru);
+			return (_rpos < _ru);
 		}
 
 		@Override
@@ -424,11 +439,10 @@
 				boolean nextRow = (_cpos + 1 >= getNumCols());
 				_rpos += nextRow ? 1 : 0;
 				_cpos = nextRow ? 0 : _cpos + 1;
-				if(_rpos >= _ru)
+				if (_rpos >= _ru)
 					return; // reached end
 				_value = _data.quickGetValue(_rpos, _cpos);
-			}
-			while(!_inclZeros && _value == 0);
+			} while (!_inclZeros && _value == 0);
 		}
 	}
 
@@ -440,22 +454,21 @@
 		@Override
 		public void next(double[] buff, int rowIx, int segIx, boolean last) {
 			// copy entire dense/sparse row
-			if(_data.isAllocated()) {
-				if(_data.isInSparseFormat()) {
-					if(!_data.getSparseBlock().isEmpty(rowIx)) {
+			if (_data.isAllocated()) {
+				if (_data.isInSparseFormat()) {
+					if (!_data.getSparseBlock().isEmpty(rowIx)) {
 						SparseBlock sblock = _data.getSparseBlock();
 						int apos = sblock.pos(rowIx);
 						int alen = sblock.size(rowIx);
 						int[] aix = sblock.indexes(rowIx);
 						double[] avals = sblock.values(rowIx);
-						for(int k = apos; k < apos + alen; k++)
+						for (int k = apos; k < apos + alen; k++)
 							buff[_colIndexes[aix[k]]] = avals[k];
 					}
-				}
-				else {
+				} else {
 					final int clen = getNumCols();
 					double[] a = _data.getDenseBlockValues();
-					for(int j = 0, aix = rowIx * clen; j < clen; j++)
+					for (int j = 0, aix = rowIx * clen; j < clen; j++)
 						buff[_colIndexes[j]] = a[aix + j];
 				}
 			}
@@ -470,4 +483,27 @@
 		sb.append(_data.toString());
 		return sb.toString();
 	}
+
+	@Override
+	public MatrixBlock getValuesAsBlock() {
+		return _data;
+	}
+
+	@Override
+	public boolean getIfCountsType() {
+		return false;
+	}
+
+	@Override
+	public int[] getCounts() {
+		throw new DMLCompressionException(
+				"Invalid function call, the counts in Uncompressed Col Group is always 1 for each value");
+	}
+
+	@Override
+	public int[] getCounts(boolean includeZero) {
+		throw new DMLCompressionException(
+				"Invalid function call, the counts in Uncompressed Col Group is always 1 for each value");
+	}
+
 }
diff --git a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupValue.java b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupValue.java
index c65ca82..7edda8f 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupValue.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupValue.java
@@ -129,6 +129,7 @@
 		_dict = dict;
 	}
 
+	@Override
 	public MatrixBlock getValuesAsBlock() {
 		boolean containsZeros = (this instanceof ColGroupOffset) ? ((ColGroupOffset) this)._zeros : false;
 		final double[] values = getValues();
@@ -152,6 +153,10 @@
 		return getCounts(rl, ru, tmp);
 	}
 
+	public boolean getIfCountsType(){
+		return true;
+	}
+
 	public abstract int[] getCounts(int rl, int ru, int[] out);
 
 	public int[] getCounts(boolean inclZeros) {
@@ -209,6 +214,7 @@
 		return ret;
 	}
 
+
 	protected final double sumValues(int valIx, double[] b) {
 		final int numCols = getNumCols();
 		final int valOff = valIx * numCols;
@@ -280,9 +286,6 @@
 			result.quickSetValue(0, _colIndexes[j], vals[j]);
 	}
 
-	// additional vector-matrix multiplication to avoid DDC uncompression
-	public abstract void leftMultByRowVector(ColGroupDDC vector, MatrixBlock result);
-
 	/**
 	 * Method for use by subclasses. Applies a scalar operation to the value metadata stored in the superclass.
 	 * 
@@ -308,13 +311,7 @@
 		unaryAggregateOperations(op, result, 0, getNumRows());
 	}
 
-	/**
-	 * 
-	 * @param op     aggregation operator
-	 * @param result output matrix block
-	 * @param rl     row lower index, inclusive
-	 * @param ru     row upper index, exclusive
-	 */
+	@Override
 	public void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result, int rl, int ru) {
 		// sum and sumsq (reduceall/reducerow over tuples and counts)
 		if(op.aggOp.increOp.fn instanceof KahanPlus || op.aggOp.increOp.fn instanceof KahanPlusSq) {
diff --git a/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeEstimator.java b/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeEstimator.java
index 842a890..4f73ff8 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeEstimator.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeEstimator.java
@@ -99,9 +99,12 @@
 			double compRatio = uncompSize / minCompressedSize;
 
 			if(compRatio > 1000) {
-
-				LOG.warn("\n\tVery good CompressionRatio: " + compRatio + "\n\tUncompressedSize: " + uncompSize
-					+ "\tCompressedSize: " + minCompressedSize + "\tType: " + sizeInfos[col].getBestCompressionType());
+				StringBuilder sb = new StringBuilder();
+				sb.append("Very good CompressionRatio: " +String.format("%10.1f", compRatio));
+				sb.append(" UncompressedSize: " + String.format("%14.0f",uncompSize));
+				sb.append(" tCompressedSize: " + String.format("%14.0f",minCompressedSize));
+				sb.append(" type: " + sizeInfos[col].getBestCompressionType());
+				LOG.warn(sb.toString());
 			}
 
 			if(compRatio > 1) {
diff --git a/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeInfoColGroup.java b/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeInfoColGroup.java
index ba66a81..7090ff8 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeInfoColGroup.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/estim/CompressedSizeInfoColGroup.java
@@ -121,6 +121,9 @@
 					fact.numCols,
 					((double) fact.numVals / (fact.numRows * fact.numCols)));
 				break;
+			case QUAN:
+				size = ColGroupSizes.estimateInMemorySizeQuan(fact.numRows, fact.numCols); 
+				break;
 			default:
 				throw new NotImplementedException("The col compression Type is not yet supported");
 		}
diff --git a/src/main/java/org/apache/sysds/runtime/matrix/operators/ScalarOperator.java b/src/main/java/org/apache/sysds/runtime/matrix/operators/ScalarOperator.java
index 6e0592a..a395397 100644
--- a/src/main/java/org/apache/sysds/runtime/matrix/operators/ScalarOperator.java
+++ b/src/main/java/org/apache/sysds/runtime/matrix/operators/ScalarOperator.java
@@ -23,6 +23,7 @@
 import org.apache.sysds.runtime.functionobjects.BitwShiftL;
 import org.apache.sysds.runtime.functionobjects.BitwShiftR;
 import org.apache.sysds.runtime.functionobjects.Builtin;
+import org.apache.sysds.runtime.functionobjects.Builtin.BuiltinCode;
 import org.apache.sysds.runtime.functionobjects.Equals;
 import org.apache.sysds.runtime.functionobjects.Minus;
 import org.apache.sysds.runtime.functionobjects.MinusNz;
@@ -31,7 +32,6 @@
 import org.apache.sysds.runtime.functionobjects.NotEquals;
 import org.apache.sysds.runtime.functionobjects.Power2;
 import org.apache.sysds.runtime.functionobjects.ValueFunction;
-import org.apache.sysds.runtime.functionobjects.Builtin.BuiltinCode;
 
 
 /**
diff --git a/src/test/java/org/apache/sysds/test/TestUtils.java b/src/test/java/org/apache/sysds/test/TestUtils.java
index 1e38e2c..5dd924e 100644
--- a/src/test/java/org/apache/sysds/test/TestUtils.java
+++ b/src/test/java/org/apache/sysds/test/TestUtils.java
@@ -747,6 +747,12 @@
 		}
 		assertTrue("" + countErrors + " values are not in equal", countErrors == 0);
 	}
+
+	public static void compareMatrices(double[][] expectedMatrix, double[][] actualMatrix, double epsilon){
+		assertTrue("The number of columns in the matrixes should be equal", expectedMatrix.length == actualMatrix.length);
+		assertTrue("The number of rows in the matrixes should be equal", expectedMatrix[0].length == actualMatrix[0].length);
+		compareMatrices(expectedMatrix, actualMatrix, expectedMatrix.length, expectedMatrix[0].length, epsilon);
+	}
 	
 	public static void compareFrames(String[][] expectedFrame, String[][] actualFrame, int rows, int cols ) {
 		int countErrors = 0;
@@ -780,6 +786,14 @@
 		assertTrue("" + countErrors + " values are not in equal", countErrors == 0);
 	}
 
+	public static void compareMatricesBitAvgDistance(double[][] expectedMatrix, double[][] actualMatrix,
+			long maxUnitsOfLeastPrecision, long maxAvgDistance, String message){
+		assertTrue("The number of columns in the matrixes should be equal", expectedMatrix.length == actualMatrix.length);
+		assertTrue("The number of rows in the matrixes should be equal", expectedMatrix[0].length == actualMatrix[0].length);
+		compareMatricesBitAvgDistance(expectedMatrix, actualMatrix, expectedMatrix.length, actualMatrix[0].length, 
+			maxUnitsOfLeastPrecision, maxAvgDistance, message);
+	}
+
 	public static void compareMatricesBitAvgDistance(double[][] expectedMatrix, double[][] actualMatrix, int rows, int cols,
 		long maxUnitsOfLeastPrecision, long maxAvgDistance, String message){
 		int countErrors = 0;
@@ -790,7 +804,7 @@
 				distance = compareScalarBits(expectedMatrix[i][j], actualMatrix[i][j]);
 				sumDistance += distance;
 				if(distance > maxUnitsOfLeastPrecision){
-					System.out.println(expectedMatrix[i][j] +" vs actual: "+actualMatrix[i][j]+" at "+i+" "+j);
+					System.out.println(expectedMatrix[i][j] +" vs actual: "+actualMatrix[i][j]+" at "+i+" "+j + " Distance in bits: " + distance);
 					countErrors++;
 				}
 			}
@@ -801,6 +815,70 @@
 			avgDistance <= maxAvgDistance);
 	}
 
+	/**
+	 * Get Percent Distance with slight cheat where if values are close to 0.
+	 * @param x value 1
+	 * @param y value 2
+	 * @return Percent distance
+	 */
+	private static double getPercentDistance(double x, double y, boolean ignoreZero){
+		
+		if((x < 0 && y > 0 )||(x>0 && y< 0)) return 0.0;
+		double min = Math.abs(Math.min(x,y));
+		double max = Math.abs(Math.max(x,y));
+		if(ignoreZero && min < 0.0001){
+			return 1.0;
+		}
+		if(min < 0.0001 || max < 0.0001){
+			min += 0.0001;
+			max += 0.0001;
+		}
+		return min / max;
+	}
+
+
+
+	public static void compareMatricesPercentageDistance(double[][] expectedMatrix, double[][] actualMatrix,  
+			double percentDistanceAllowed, double maxAveragePercentDistance,  String message){
+		assertTrue("The number of columns in the matrixes should be equal", expectedMatrix.length == actualMatrix.length);
+		assertTrue("The number of rows in the matrixes should be equal", expectedMatrix[0].length == actualMatrix[0].length);
+		compareMatricesPercentageDistance(expectedMatrix, actualMatrix, expectedMatrix.length, expectedMatrix[0].length,
+			percentDistanceAllowed, maxAveragePercentDistance, message, false);
+	}
+
+	public static void compareMatricesPercentageDistance(double[][] expectedMatrix, double[][] actualMatrix,  
+			double percentDistanceAllowed, double maxAveragePercentDistance,  String message, boolean ignoreZero){
+		assertTrue("The number of columns in the matrixes should be equal", expectedMatrix.length == actualMatrix.length);
+		assertTrue("The number of rows in the matrixes should be equal", expectedMatrix[0].length == actualMatrix[0].length);
+		compareMatricesPercentageDistance(expectedMatrix, actualMatrix, expectedMatrix.length, expectedMatrix[0].length,
+			percentDistanceAllowed, maxAveragePercentDistance, message, ignoreZero);
+	}
+
+	public static void compareMatricesPercentageDistance(double[][] expectedMatrix, double[][] actualMatrix, int rows,
+		int cols, double percentDistanceAllowed, double maxAveragePercentDistance,  String message, boolean ignoreZero){
+			assertTrue("percentDistanceAllowed should be between 1 and 0", percentDistanceAllowed >= 0.0 && percentDistanceAllowed <= 1.0);
+			assertTrue("maxAveragePercentDistance should be between 1 and 0", maxAveragePercentDistance >= 0.0 && maxAveragePercentDistance <= 1.0);
+
+			int countErrors = 0;
+			double sumPercentDistance = 0;
+			double distance;
+
+			for (int i = 0; i < rows; i++) {
+				for (int j = 0; j < cols; j++) {
+					distance = getPercentDistance(expectedMatrix[i][j], actualMatrix[i][j], ignoreZero);
+					sumPercentDistance += distance;
+					if(distance < percentDistanceAllowed){
+						System.out.println(expectedMatrix[i][j] +" vs actual: "+actualMatrix[i][j]+" at "+i+" "+j + " Distance in percent " + distance);
+						countErrors++;
+					}
+				}
+			}
+			double avgDistance = sumPercentDistance / (rows * cols);
+			assertTrue(message + "\n" + countErrors + " values are not in equal of total: " + (rows * cols), countErrors == 0);
+			assertTrue(message + "\nThe avg distance: "+ avgDistance +" was lower than threshold " + maxAveragePercentDistance,
+				avgDistance > maxAveragePercentDistance);
+	}
+
 	public static void compareMatricesBitAvgDistance(double[][] expectedMatrix, double[][] actualMatrix, int rows,
 		int cols, long maxUnitsOfLeastPrecision, long maxAvgDistance) {
 			compareMatricesBitAvgDistance(expectedMatrix, actualMatrix, rows, cols, maxUnitsOfLeastPrecision, maxAvgDistance, "");
@@ -840,7 +918,8 @@
 
 	public static void compareScalarBitsJUnit(double d1, double d2, long maxUnitsOfLeastPrecision){
 
-		assertTrue("Given scalars do not match: " + d1 + " != " + d2 ,compareScalarBits(d1,d2,maxUnitsOfLeastPrecision));
+		long distance = compareScalarBits(d1,d2);
+		assertTrue("Given scalars do not match: " + d1 + " != " + d2 + " with bitDistance: " + distance ,distance <= maxUnitsOfLeastPrecision);
 	}
 	
 	public static void compareScalars(String expected, String actual) {
diff --git a/src/test/java/org/apache/sysds/test/component/compress/AbstractCompressedUnaryTests.java b/src/test/java/org/apache/sysds/test/component/compress/AbstractCompressedUnaryTests.java
index a6315e4..3f5d71b 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/AbstractCompressedUnaryTests.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/AbstractCompressedUnaryTests.java
@@ -19,6 +19,8 @@
 
 package org.apache.sysds.test.component.compress;
 
+import static org.junit.Assert.assertTrue;
+
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.sysds.runtime.compress.CompressedMatrixBlock;
 import org.apache.sysds.runtime.compress.CompressionSettings;
@@ -149,13 +151,37 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-			int dim1 = (aggType == AggType.ROWSUMS || aggType == AggType.ROWSUMSSQ || aggType == AggType.ROWMINS ||
+			int dim1 = (aggType == AggType.ROWSUMS || aggType == AggType.ROWSUMSSQ || aggType == AggType.ROWMAXS ||
 				aggType == AggType.ROWMINS) ? rows : 1;
 			int dim2 = (aggType == AggType.COLSUMS || aggType == AggType.COLSUMSSQ || aggType == AggType.COLMAXS ||
 				aggType == AggType.COLMINS) ? cols : 1;
 
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, dim1, dim2, 2048, 20, compressionSettings.toString());
+			assertTrue("dim 1 is equal in non compressed res", d1.length == dim1);
+			assertTrue("dim 1 is equal in compressed res", d2.length == dim1);
+			assertTrue("dim 2 is equal in non compressed res", d1[0].length == dim2);
+			assertTrue("dim 2 is equal in compressed res", d2[0].length == dim2);
 
+			if(compressionSettings.lossy) {
+				if(aggType == AggType.COLSUMS) {
+					TestUtils.compareMatrices(d1, d2, lossyTolerance * 30 * dim2);
+				}
+				else 
+				if(aggType == AggType.ROWSUMS) {
+					TestUtils.compareMatrices(d1, d2, lossyTolerance * 16 * dim1);
+				}
+				else {
+					boolean ignoreZero = true;
+					TestUtils.compareMatricesPercentageDistance(d1,
+						d2,
+						0.1,
+						0.9,
+						compressionSettings.toString(),
+						ignoreZero);
+				}
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 2048, 20, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
diff --git a/src/test/java/org/apache/sysds/test/component/compress/CompressedMatrixTest.java b/src/test/java/org/apache/sysds/test/component/compress/CompressedMatrixTest.java
index 1ee405b..7acf790 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/CompressedMatrixTest.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/CompressedMatrixTest.java
@@ -69,8 +69,12 @@
 				return; // Input was not compressed then just pass test
 				// Assert.assertTrue("Compression Failed \n" + this.toString(), false);
 			}
-
-			TestUtils.compareMatricesBitAvgDistance(input, deCompressed, rows, cols, 0, 0);
+			if(compressionSettings.lossy) {
+				TestUtils.compareMatrices(input, deCompressed, lossyTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(input, deCompressed, 0, 0, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -88,7 +92,12 @@
 				for(int j = 0; j < cols; j++) {
 					double ulaVal = input[i][j];
 					double claVal = cmb.getValue(i, j); // calls quickGetValue internally
-					TestUtils.compareScalarBitsJUnit(ulaVal, claVal, 0); // Should be exactly same value
+					if(compressionSettings.lossy) {
+						TestUtils.compareCellValue(ulaVal, claVal, lossyTolerance, false);
+					}
+					else {
+						TestUtils.compareScalarBitsJUnit(ulaVal, claVal, 0); // Should be exactly same value
+					}
 				}
 		}
 		catch(Exception e) {
@@ -117,7 +126,12 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, rows, cols + 1, 0, 1);
+			if(compressionSettings.lossy) {
+				TestUtils.compareMatrices(d1, d2, lossyTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 0, 1, "Test Append Matrix");
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -132,7 +146,7 @@
 				return; // Input was not compressed then just pass test
 
 			MatrixBlock vector1 = DataConverter
-				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, 0, 1, 1.0, 3));
+				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, 0.5, 1.5, 1.0, 3));
 
 			// ChainType ctype = ChainType.XtwXv;
 			// Linear regression .
@@ -141,7 +155,7 @@
 			}) {
 
 				MatrixBlock vector2 = (ctype == ChainType.XtwXv) ? DataConverter
-					.convertToMatrixBlock(TestUtils.generateTestMatrix(rows, 1, 0, 1, 1.0, 3)) : null;
+					.convertToMatrixBlock(TestUtils.generateTestMatrix(rows, 1, 0.5, 1.5, 1.0, 3)) : null;
 
 				// matrix-vector uncompressed
 				MatrixBlock ret1 = mb.chainMatrixMultOperations(vector1, vector2, new MatrixBlock(), ctype);
@@ -152,7 +166,19 @@
 				// compare result with input
 				double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 				double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-				TestUtils.compareMatricesBitAvgDistance(d1, d2, cols, 1, 512, 32);
+
+				if(compressionSettings.lossy) {
+					// TODO Make actual calculation to know the tolerance
+					// double scaledTolerance = lossyTolerance * d1.length * d1.length * 1.5;
+					// if(ctype == ChainType.XtwXv){
+					// scaledTolerance *= d1.length * d1.length * 0.5;
+					// }
+					// TestUtils.compareMatrices(d1, d2, d1.length, d1[0].length, scaledTolerance );
+					TestUtils.compareMatricesPercentageDistance(d1, d2, 0.95, 0.95, compressionSettings.toString());
+				}
+				else {
+					TestUtils.compareMatricesBitAvgDistance(d1, d2, 512, 32, compressionSettings.toString());
+				}
 			}
 		}
 		catch(Exception e) {
@@ -179,7 +205,15 @@
 				// compare result with input
 				double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 				double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-				TestUtils.compareMatricesBitAvgDistance(d1, d2, cols, cols, 2048, 20);
+				if(compressionSettings.lossy) {
+					/**
+					 * Probably one of the worst thing you can do to increase the amount the values are estimated wrong
+					 */
+					TestUtils.compareMatricesPercentageDistance(d1, d2, 0.0, 0.8, compressionSettings.toString());
+				}
+				else {
+					TestUtils.compareMatricesBitAvgDistance(d1, d2, 2048, 20, compressionSettings.toString());
+				}
 			}
 		}
 		catch(Exception e) {
@@ -189,13 +223,32 @@
 	}
 
 	@Test
-	public void testMatrixVectorMult() {
+	public void testMatrixVectorMult01() {
+		testMatrixVectorMult(1.0, 1.1);
+	}
+
+	@Test
+	public void testMatrixVectorMult02() {
+		testMatrixVectorMult(0.7, 1.0);
+	}
+
+	@Test
+	public void testMatrixVectorMult03() {
+		testMatrixVectorMult(-1.0, 1.0);
+	}
+
+	@Test
+	public void testMatrixVectorMult04() {
+		testMatrixVectorMult(1.0, 5.0);
+	}
+
+	public void testMatrixVectorMult(double min, double max) {
 		try {
 			if(!(cmb instanceof CompressedMatrixBlock))
 				return; // Input was not compressed then just pass test
 
 			MatrixBlock vector = DataConverter
-				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, 1, 1, 1.0, 3));
+				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, min, max, 1.0, 3));
 
 			// Make Operator
 			AggregateOperator aop = new AggregateOperator(0, Plus.getPlusFnObject());
@@ -210,7 +263,15 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, rows, 1, 1024, 1);
+
+			if(compressionSettings.lossy) {
+				// TODO Make actual calculation to know the actual tolerance
+				double scaledTolerance = lossyTolerance * 30 * max;
+				TestUtils.compareMatrices(d1, d2, scaledTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 2048, 5, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -225,7 +286,7 @@
 				return; // Input was not compressed then just pass test
 
 			MatrixBlock vector = DataConverter
-				.convertToMatrixBlock(TestUtils.generateTestMatrix(1, rows, 1, 1, 1.0, 3));
+				.convertToMatrixBlock(TestUtils.generateTestMatrix(1, rows, 0.5, 1.5, 1.0, 3));
 
 			// Make Operator
 			AggregateOperator aop = new AggregateOperator(0, Plus.getPlusFnObject());
@@ -240,7 +301,12 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, 1, cols, 10000, 500);
+			if(compressionSettings.lossy) {
+				TestUtils.compareMatricesPercentageDistance(d1, d2, 0.60, 0.97, compressionSettings.toString());
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 10000, 500, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -254,8 +320,9 @@
 			if(!(cmb instanceof CompressedMatrixBlock))
 				return; // Input was not compressed then just pass test
 
+			double addValue = 1000;
 			// matrix-scalar uncompressed
-			ScalarOperator sop = new RightScalarOperator(Plus.getPlusFnObject(), 7);
+			ScalarOperator sop = new RightScalarOperator(Plus.getPlusFnObject(), addValue);
 			MatrixBlock ret1 = mb.scalarOperations(sop, new MatrixBlock());
 
 			// matrix-scalar compressed
@@ -267,7 +334,14 @@
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
 
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, rows, cols, 150, 1);
+			if(compressionSettings.lossy) {
+				double modifiedTolerance = Math.max(TestConstants.getMaxRangeValue(valRange) + addValue,
+					Math.abs(TestConstants.getMinRangeValue(valRange) + addValue)) * 2 / 127.0;
+				TestUtils.compareMatrices(d1, d2, modifiedTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 150, 1, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -293,8 +367,13 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, rows, cols, 150, 1);
+			if(compressionSettings.lossy) {
+				double modifiedTolerance = lossyTolerance * 7;
+				TestUtils.compareMatrices(d1, d2, modifiedTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 150, 1, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -331,8 +410,12 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(mb);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(tmp);
-
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, rows, cols, 0, 0);
+			if(compressionSettings.lossy) {
+				TestUtils.compareMatrices(d1, d2, lossyTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 0, 0, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -407,10 +490,11 @@
 			builder.append("\n\tcol groups sizes: " + cStat.getGroupsSizesString());
 			builder.append("\n\t" + this.toString());
 
-			//NOTE: The Jol estimate is wrong for shared dictionaries because
-			//      it treats the object hierarchy as a tree and not a graph
-			assertTrue(builder.toString(), actualSize <= originalSize 
-				&& (compressionSettings.allowSharedDDCDictionary || actualSize == JolEstimatedSize));
+			// NOTE: The Jol estimate is wrong for shared dictionaries because
+			// it treats the object hierarchy as a tree and not a graph
+			assertTrue(builder.toString(),
+				actualSize <= originalSize &&
+					(compressionSettings.allowSharedDDCDictionary || actualSize == JolEstimatedSize));
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -420,7 +504,8 @@
 
 	@Test
 	public void testCompressionScale() {
-		// This test is here for a sanity check such that we verify that the compression ratio from our Matrix
+		// This test is here for a sanity check such that we verify that the compression
+		// ratio from our Matrix
 		// Compressed Block is not unreasonably good.
 		try {
 			if(!(cmb instanceof CompressedMatrixBlock))
diff --git a/src/test/java/org/apache/sysds/test/component/compress/CompressedTestBase.java b/src/test/java/org/apache/sysds/test/component/compress/CompressedTestBase.java
index c0668c6..990b83b 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/CompressedTestBase.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/CompressedTestBase.java
@@ -41,46 +41,57 @@
 public class CompressedTestBase extends TestBase {
 
 	protected static SparsityType[] usedSparsityTypes = new SparsityType[] { // Sparsity 0.9, 0.1, 0.01 and 0.0
-		// SparsityType.DENSE,
-		SparsityType.SPARSE, SparsityType.ULTRA_SPARSE, SparsityType.EMPTY
+		SparsityType.DENSE,
+		// SparsityType.SPARSE,
+		// SparsityType.ULTRA_SPARSE,
+		// SparsityType.EMPTY
 	};
 	protected static ValueType[] usedValueTypes = new ValueType[] {
-		// ValueType.RAND,
-		ValueType.CONST, ValueType.RAND_ROUND, ValueType.OLE_COMPRESSIBLE, ValueType.RLE_COMPRESSIBLE,};
+		ValueType.RAND, 
+		ValueType.CONST,
+		ValueType.RAND_ROUND, 
+		ValueType.OLE_COMPRESSIBLE, 
+		ValueType.RLE_COMPRESSIBLE,
+	};
 
-	protected static ValueRange[] usedValueRanges = new ValueRange[] {ValueRange.SMALL,
-		// ValueRange.LARGE,
+	protected static ValueRange[] usedValueRanges = new ValueRange[] {
+		// ValueRange.SMALL,
+		ValueRange.LARGE,
 	};
 
 	private static List<CompressionType> DDCOnly = new ArrayList<>();
 	private static List<CompressionType> OLEOnly = new ArrayList<>();
 	private static List<CompressionType> RLEOnly = new ArrayList<>();
+	private static List<CompressionType> QuanOnly = new ArrayList<>();
 
 	static {
 		DDCOnly.add(CompressionType.DDC);
 		OLEOnly.add(CompressionType.OLE);
 		RLEOnly.add(CompressionType.RLE);
+		QuanOnly.add(CompressionType.QUAN);
 	}
 
 	private static final int compressionSeed = 7;
 
 	protected static CompressionSettings[] usedCompressionSettings = new CompressionSettings[] {
-		new CompressionSettingsBuilder().setSamplingRatio(0.1).setAllowSharedDDCDictionary(false)
-			.setSeed(compressionSeed).setValidCompressions(DDCOnly).setInvestigateEstimate(true).create(),
-		new CompressionSettingsBuilder().setSamplingRatio(0.1).setAllowSharedDDCDictionary(true)
-			.setSeed(compressionSeed).setValidCompressions(DDCOnly).setInvestigateEstimate(true).create(),
-		new CompressionSettingsBuilder().setSamplingRatio(0.1).setSeed(compressionSeed).setValidCompressions(OLEOnly)
-			.setInvestigateEstimate(true).create(),
-		new CompressionSettingsBuilder().setSamplingRatio(0.1).setSeed(compressionSeed).setValidCompressions(RLEOnly)
-			.setInvestigateEstimate(true).create(),
+		// new CompressionSettingsBuilder().setSamplingRatio(0.1).setAllowSharedDDCDictionary(false)
+		// 	.setSeed(compressionSeed).setValidCompressions(DDCOnly).setInvestigateEstimate(true).create(),
+		// new CompressionSettingsBuilder().setSamplingRatio(0.1).setAllowSharedDDCDictionary(true)
+		// 	.setSeed(compressionSeed).setValidCompressions(DDCOnly).setInvestigateEstimate(true).create(),
+		// new CompressionSettingsBuilder().setSamplingRatio(0.1).setSeed(compressionSeed).setValidCompressions(OLEOnly)
+		// 	.setInvestigateEstimate(true).create(),
+		// new CompressionSettingsBuilder().setSamplingRatio(0.1).setSeed(compressionSeed).setValidCompressions(RLEOnly)
+		// 	.setInvestigateEstimate(true).create(),
 		new CompressionSettingsBuilder().setSamplingRatio(1.0).setSeed(compressionSeed).setInvestigateEstimate(true)
-			.create()};
+			.create(),
+		new CompressionSettingsBuilder().setSamplingRatio(1.0).setSeed(compressionSeed).setValidCompressions(QuanOnly)
+			.setInvestigateEstimate(true).create()
+		};
 
 	protected static MatrixTypology[] usedMatrixTypology = new MatrixTypology[] { // Selected Matrix Types
-		MatrixTypology.SMALL,
-		// MatrixTypology.FEW_COL,
-		MatrixTypology.FEW_ROW,
-		// MatrixTypology.LARGE,
+		MatrixTypology.SMALL, MatrixTypology.FEW_COL,
+		// MatrixTypology.FEW_ROW,
+		MatrixTypology.LARGE,
 		// MatrixTypology.SINGLE_COL,
 		// MatrixTypology.SINGLE_ROW,
 		MatrixTypology.L_ROWS,
@@ -99,12 +110,15 @@
 
 	protected int sampleTolerance = 1024;
 
+	protected double lossyTolerance;
+
 	public CompressedTestBase(SparsityType sparType, ValueType valType, ValueRange valueRange,
 		CompressionSettings compSettings, MatrixTypology MatrixTypology) {
 		super(sparType, valType, valueRange, compSettings, MatrixTypology);
-		// System.out.println("HERE !");
-		try {
 
+		try {
+			if(compSettings.lossy)
+				setLossyTolerance(valueRange);
 			cmb = CompressedMatrixBlockFactory.compress(mb, k, compressionSettings);
 
 			if(cmb instanceof CompressedMatrixBlock) {
@@ -122,13 +136,21 @@
 		}
 		catch(Exception e) {
 			e.printStackTrace();
-			// throw new RuntimeException(
-			// "CompressionTest Init failed with settings: " + this.toString() + "\n" + e.getMessage(), e);
 			assertTrue("\nCompressionTest Init failed with settings: " + this.toString(), false);
 		}
 
 	}
 
+	private void setLossyTolerance(ValueRange valueRange) {
+		/**
+		 * Tolerance for encoding values is the maximum value in dataset divided by number distinct values available in
+		 * a single Byte (since we encode our quntization in Byte)
+		 */
+		lossyTolerance = (double) Math.max(TestConstants.getMaxRangeValue(valueRange),
+			Math.abs(TestConstants.getMinRangeValue(valueRange))) / 127.0;
+
+	}
+
 	@Parameters
 	public static Collection<Object[]> data() {
 		ArrayList<Object[]> tests = new ArrayList<>();
@@ -138,7 +160,7 @@
 				for(ValueRange vr : usedValueRanges) {
 					for(CompressionSettings cs : usedCompressionSettings) {
 						for(MatrixTypology mt : usedMatrixTypology) {
-							tests.add(new Object[] {st, vt, vr, cs, mt,});
+							tests.add(new Object[] {st, vt, vr, cs, mt});
 
 						}
 					}
diff --git a/src/test/java/org/apache/sysds/test/component/compress/CompressedVectorTest.java b/src/test/java/org/apache/sysds/test/component/compress/CompressedVectorTest.java
index 451a234..2607b92 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/CompressedVectorTest.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/CompressedVectorTest.java
@@ -83,11 +83,15 @@
 
 			// quantile compressed
 			double ret2 = cmb.cmOperations(cm).getRequiredResult(opType);
-			// compare result with input allowing 1 bit difference in least significant location
-			TestUtils.compareScalarBitsJUnit(ret1, ret2, 64);
 
+			if (compressionSettings.lossy) {
+				TestUtils.compareCellValue(ret1, ret2, lossyTolerance, false);
+			} else {
+				TestUtils.compareScalarBitsJUnit(ret1, ret2, 64);
+			}
 		}
 		catch(Exception e) {
+			e.printStackTrace();
 			throw new Exception(this.toString() + "\n" + e.getMessage(), e);
 		}
 	}
@@ -103,10 +107,14 @@
 			MatrixBlock tmp2 = cmb.sortOperations(null, new MatrixBlock());
 			double ret2 = tmp2.pickValue(0.95);
 
-			// compare result with input
-			TestUtils.compareScalarBitsJUnit(ret1, ret2, 64);
+			if (compressionSettings.lossy) {
+				TestUtils.compareCellValue(ret1, ret2, lossyTolerance, false);
+			} else {
+				TestUtils.compareScalarBitsJUnit(ret1, ret2, 64);
+			}
 		}
 		catch(Exception e) {
+			e.printStackTrace();
 			throw new RuntimeException(this.toString() + "\n" + e.getMessage(), e);
 		}
 	}
diff --git a/src/test/java/org/apache/sysds/test/component/compress/CompressibleInputGenerator.java b/src/test/java/org/apache/sysds/test/component/compress/CompressibleInputGenerator.java
index 443c828..54c9414 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/CompressibleInputGenerator.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/CompressibleInputGenerator.java
@@ -20,7 +20,9 @@
 package org.apache.sysds.test.component.compress;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
+import java.util.DoubleSummaryStatistics;
 import java.util.List;
 import java.util.Random;
 
@@ -30,43 +32,59 @@
 import org.apache.sysds.runtime.util.DataConverter;
 
 /**
- * WARNING, this compressible input generator generates transposed inputs, (rows and cols are switched) this is because
- * then the test does not need to transpose the input for the colGroups that expect transposed inputs.
+ * WARNING, this compressible input generator generates transposed inputs, (rows
+ * and cols are switched) this is because then the test does not need to
+ * transpose the input for the colGroups that expect transposed inputs.
  * 
  */
 public class CompressibleInputGenerator {
 
-	public static MatrixBlock getInput(int rows, int cols, CompressionType ct, int nrUnique, double sparsity,
-		int seed) {
-		double[][] output = getInputDoubleMatrix(rows, cols, ct, nrUnique, sparsity, seed, false);
+	public static MatrixBlock getInput(int rows, int cols, CompressionType ct, int nrUnique, 
+			double sparsity, int seed) {
+		double[][] output = getInputDoubleMatrix(rows, cols, ct, nrUnique, 1000000, -1000000, sparsity, seed, false);
 		return DataConverter.convertToMatrixBlock(output);
 	}
 
-	public static double[][] getInputDoubleMatrix(int rows, int cols, CompressionType ct, int nrUnique, double sparsity,
-		int seed, boolean transpose) {
+	public static MatrixBlock getInput(int rows, int cols, CompressionType ct, int nrUnique, int max, int min,
+			double sparsity, int seed) {
+		double[][] output = getInputDoubleMatrix(rows, cols, ct, nrUnique, max, min, sparsity, seed, false);
+		return DataConverter.convertToMatrixBlock(output);
+	}
+
+	public static double[][] getInputDoubleMatrix(int rows, int cols, CompressionType ct, int nrUnique, int max,
+			int min, double sparsity, int seed, boolean transpose) {
 		double[][] output;
-		switch(ct) {
+		switch (ct) {
 			case RLE:
-				output = rle(rows, cols, nrUnique, sparsity, seed, transpose);
+				output = rle(rows, cols, nrUnique, max, min, sparsity, seed, transpose);
 				break;
 			case OLE:
-				output = ole(rows, cols, nrUnique, sparsity, seed, transpose);
+				output = ole(rows, cols, nrUnique, max, min, sparsity, seed, transpose);
 				break;
 			default:
 				throw new NotImplementedException("Not implemented generator.");
 		}
-
+		for(double[] x : output){
+			DoubleSummaryStatistics dss =  Arrays.stream(x).summaryStatistics();
+			if(dss.getMax() > max) {
+				throw new RuntimeException("Incorrect matrix generated "+ct+", max to high was: " + dss.getMax() + " should be :" + max);
+			}
+			if(dss.getMin() < min) {
+				throw new RuntimeException("Incorrect matrix generated "+ct+", min to low was: " + dss.getMin()  + " should be :" + min);
+			}
+		}
 		return output;
 	}
 
-	private static double[][] rle(int rows, int cols, int nrUnique, double sparsity, int seed, boolean transpose) {
+	private static double[][] rle(int rows, int cols, int nrUnique, int max, int min, double sparsity, int seed,
+			boolean transpose) {
 
 		Random r = new Random(seed);
-		List<Double> values = getNRandomValues(nrUnique, r);
+		List<Double> values = getNRandomValues(nrUnique, r, max, min);
 
-		double[][] matrix = transpose ? new double[rows][cols]: new double[cols][rows];
+		double[][] matrix = transpose ? new double[rows][cols] : new double[cols][rows];
 
-		for(int colNr = 0; colNr < cols; colNr++) {
+		for (int colNr = 0; colNr < cols; colNr++) {
 			Collections.shuffle(values, r);
 
 			// Generate a Dirichlet distribution, to distribute the values
@@ -76,26 +94,26 @@
 
 			int pointer = 0;
 			int valuePointer = 0;
-			for(int nr : occurences) {
+			for (int nr : occurences) {
 				int zeros = (int) (Math.floor(nr * (1.0 - sparsity)));
 				int before = (zeros > 0) ? r.nextInt(zeros) : 0;
 				int after = zeros - before;
 				pointer += before;
-				for(int i = before; i < nr - after; i++) {
-					if(transpose){
+				for (int i = before; i < nr - after; i++) {
+					if (transpose) {
 						matrix[pointer][colNr] = values.get(valuePointer);
-					}else{
+					} else {
 						matrix[colNr][pointer] = values.get(valuePointer);
 					}
 					pointer++;
 				}
 				pointer += after;
 				valuePointer++;
-				if(valuePointer == values.size() && after == 0) {
-					while(pointer < rows) {
-						if(transpose){
+				if (valuePointer == values.size() && after == 0) {
+					while (pointer < rows) {
+						if (transpose) {
 							matrix[pointer][colNr] = values.get(nrUnique - 1);
-						}else{
+						} else {
 							matrix[colNr][pointer] = values.get(nrUnique - 1);
 						}
 						pointer++;
@@ -106,67 +124,78 @@
 		return matrix;
 	}
 
-	// Note ole compress the best if there are multiple correlated columns.
-	// Therefore the multiple columns are needed for good compressions.
-	// Also Nr Unique is only associated to a specific column in this compression, so the number of
-	// uniques are only in a single column, making actual the nrUnique (cols * nrUnique)
-	// Does not guaranty that all the nr uniques are in use, since the values are randomly selected.
-	private static double[][] ole(int rows, int cols, int nrUnique, double sparsity, int seed, boolean transpose) {
+	/**
+	 * Note ole compress the best if there are multiple correlated columns.
+	 * Therefore the multiple columns are needed for good compressions. Also Nr
+	 * Unique is only associated to a specific column in this compression, so the
+	 * number of uniques are only in a single column, making actual the nrUnique
+	 * (cols * nrUnique) Does not guaranty that all the nr uniques are in use, since
+	 * the values are randomly selected.
+	 * 
+	 * @param rows      Number of rows in generated output
+	 * @param cols      Number of cols in generated output
+	 * @param nrUnique  Number of unique values in generated output, Note this means
+	 *                  base unique in this case. and this number will grow
+	 *                  according to sparsity as well.
+	 * @param max       The Maximum Value contained
+	 * @param min       The Minimum value contained
+	 * @param sparsity  The sparsity of the generated matrix
+	 * @param seed      The seed of the generated matrix
+	 * @param transpose If the output should be a transposed matrix or not
+	 * @return Generated nicely compressible OLE col Group.
+	 */
+	private static double[][] ole(int rows, int cols, int nrUnique, int max, int min, double sparsity, int seed,
+			boolean transpose) {
 		// chose some random values
 		Random r = new Random(seed);
-		List<Double> values = getNRandomValues(nrUnique, r);
-		double[][] matrix = transpose ? new double[rows][cols]: new double[cols][rows];
+		List<Double> values = getNRandomValues(nrUnique, r, max, min);
+		double[][] matrix = transpose ? new double[rows][cols] : new double[cols][rows];
 
 		// Generate the first column.
-		// double[] col1 = new double[rows];
-		// matrix[0] = col1;
-		for(int x = 0; x < rows; x++) {
-			if(r.nextDouble() < sparsity) {
-				if(transpose){
+		for (int x = 0; x < rows; x++) {
+			if (r.nextDouble() < sparsity) {
+				if (transpose) {
 					matrix[x][0] = values.get(r.nextInt(nrUnique));
-				}else{
+				} else {
 					matrix[0][x] = values.get(r.nextInt(nrUnique));
 				}
 			}
 		}
 
-		// System.out.println(Arrays.toString(matrix[0]));
-
-		for(int y = 1; y < cols; y++) {
-			for(int x = 0; x < rows; x++) {
-				if(r.nextDouble() < sparsity) {
-					if(transpose){
-						matrix[x][y] = Double.longBitsToDouble(Double.doubleToLongBits(matrix[x][0] + y) << 32L);
-					}else{
-						matrix[y][x] = Double.longBitsToDouble(Double.doubleToLongBits(matrix[0][x] + y) << 32L);
+		for (int y = 1; y < cols; y++) {
+			for (int x = 0; x < rows; x++) {
+				if (r.nextDouble() < sparsity) {
+					if (transpose) {
+						matrix[x][y] = matrix[x][0];
+					} else {
+						matrix[y][x] = matrix[0][x];
 					}
 				}
 			}
 		}
-		// System.out.println(Arrays.toString(matrix[1]));
-
 		return matrix;
 	}
 
 	private static int[] makeDirichletDistribution(int nrUnique, int rows, Random r) {
 		double[] distribution = new double[nrUnique];
 		double sum = 0;
-		for(int i = 0; i < nrUnique; i++) {
+		for (int i = 0; i < nrUnique; i++) {
 			distribution[i] = r.nextDouble();
 			sum += distribution[i];
 		}
 
 		int[] occurences = new int[nrUnique];
-		for(int i = 0; i < nrUnique; i++) {
+		for (int i = 0; i < nrUnique; i++) {
 			occurences[i] = (int) (((double) distribution[i] / (double) sum) * (double) rows);
 		}
 		return occurences;
 	}
 
-	private static List<Double> getNRandomValues(int nrUnique, Random r) {
+	private static List<Double> getNRandomValues(int nrUnique, Random r, int max, int min) {
 		List<Double> values = new ArrayList<>();
-		for(int i = 0; i < nrUnique; i++) {
-			values.add((double)Math.round(r.nextDouble()* 1000.0));
+		for (int i = 0; i < nrUnique; i++) {
+			double v = (r.nextDouble() * (double)(max - min)) + (double)min;
+			values.add( Math.floor(v));
 		}
 		return values;
 	}
diff --git a/src/test/java/org/apache/sysds/test/component/compress/ParCompressedMatrixTest.java b/src/test/java/org/apache/sysds/test/component/compress/ParCompressedMatrixTest.java
index e86c269..8ed8f01 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/ParCompressedMatrixTest.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/ParCompressedMatrixTest.java
@@ -56,8 +56,12 @@
 				return; // Input was not compressed then just pass test
 				// Assert.assertTrue("Compression Failed \n" + this.toString(), false);
 			}
-
-			TestUtils.compareMatricesBitAvgDistance(input, deCompressed, rows, cols, 0, 0);
+			if(compressionSettings.lossy) {
+				TestUtils.compareMatrices(input, deCompressed, lossyTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(input, deCompressed, rows, cols, 0, 0);
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -76,7 +80,12 @@
 				for(int j = 0; j < cols; j++) {
 					double ulaVal = input[i][j];
 					double claVal = cmb.getValue(i, j); // calls quickGetValue internally
-					TestUtils.compareScalarBitsJUnit(ulaVal, claVal, 0); // Should be exactly same value
+					if(compressionSettings.lossy) {
+						TestUtils.compareCellValue(ulaVal, claVal, lossyTolerance, false);
+					}
+					else {
+						TestUtils.compareScalarBitsJUnit(ulaVal, claVal, 0); // Should be exactly same value
+					}
 				}
 		}
 		catch(Exception e) {
@@ -92,7 +101,7 @@
 				return; // Input was not compressed then just pass test
 
 			MatrixBlock vector1 = DataConverter
-				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, 0, 1, 1.0, 3));
+				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, 0.5, 1.5, 1.0, 3));
 
 			// ChainType ctype = ChainType.XtwXv;
 			for(ChainType ctype : new ChainType[] {ChainType.XtwXv, ChainType.XtXv,
@@ -100,7 +109,7 @@
 			}) {
 
 				MatrixBlock vector2 = (ctype == ChainType.XtwXv) ? DataConverter
-					.convertToMatrixBlock(TestUtils.generateTestMatrix(rows, 1, 0, 1, 1.0, 3)) : null;
+					.convertToMatrixBlock(TestUtils.generateTestMatrix(rows, 1, 0.5, 1.5, 1.0, 3)) : null;
 
 				// matrix-vector uncompressed
 				MatrixBlock ret1 = mb.chainMatrixMultOperations(vector1, vector2, new MatrixBlock(), ctype, k);
@@ -111,7 +120,12 @@
 				// compare result with input
 				double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 				double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-				TestUtils.compareMatricesBitAvgDistance(d1, d2, cols, 1, 2048, 32);
+				if(compressionSettings.lossy) {
+					TestUtils.compareMatricesPercentageDistance(d1, d2, 0.92, 0.95, compressionSettings.toString());
+				}
+				else {
+					TestUtils.compareMatricesBitAvgDistance(d1, d2, 2048, 32, compressionSettings.toString());
+				}
 			}
 		}
 		catch(Exception e) {
@@ -139,7 +153,15 @@
 				double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 				double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
 				// High probability that The value is off by some amount
-				TestUtils.compareMatricesBitAvgDistance(d1, d2, cols, cols, 2048, 20);
+				if(compressionSettings.lossy) {
+					/**
+					 * Probably the worst thing you can do to increase the amount the values are estimated wrong
+					 */
+					TestUtils.compareMatricesPercentageDistance(d1, d2, 0.0, 0.8, compressionSettings.toString());
+				}
+				else {
+					TestUtils.compareMatricesBitAvgDistance(d1, d2, 2048, 20, compressionSettings.toString());
+				}
 			}
 		}
 		catch(Exception e) {
@@ -149,13 +171,27 @@
 	}
 
 	@Test
-	public void testMatrixVectorMult() {
+	public void testMatrixVectorMult02() {
+		testMatrixVectorMult(0.7, 1.0);
+	}
+
+	@Test
+	public void testMatrixVectorMult03() {
+		testMatrixVectorMult(-1.0, 1.0);
+	}
+
+	@Test
+	public void testMatrixVectorMult04() {
+		testMatrixVectorMult(1.0, 5.0);
+	}
+
+	public void testMatrixVectorMult(double min, double max) {
 		try {
 			if(!(cmb instanceof CompressedMatrixBlock))
 				return; // Input was not compressed then just pass test
 
 			MatrixBlock vector = DataConverter
-				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, 1, 1, 1.0, 3));
+				.convertToMatrixBlock(TestUtils.generateTestMatrix(cols, 1, min, max, 1.0, 3));
 
 			// matrix-vector uncompressed
 			AggregateBinaryOperator abop = InstructionUtils.getMatMultOperator(k);
@@ -167,7 +203,14 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, rows, 1, 256, 1);
+			if(compressionSettings.lossy) {
+				// TODO Make actual calculation to know the actual tolerance
+				double scaledTolerance = lossyTolerance * 30 * max;
+				TestUtils.compareMatrices(d1, d2, scaledTolerance);
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 2048, 5, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -196,7 +239,12 @@
 			// compare result with input
 			double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
 			double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
-			TestUtils.compareMatricesBitAvgDistance(d1, d2, 1, cols, 10000, 500);
+			if(compressionSettings.lossy) {
+				TestUtils.compareMatricesPercentageDistance(d1, d2, 0.35, 0.96, compressionSettings.toString());
+			}
+			else {
+				TestUtils.compareMatricesBitAvgDistance(d1, d2, 10000, 500, compressionSettings.toString());
+			}
 		}
 		catch(Exception e) {
 			e.printStackTrace();
@@ -210,5 +258,4 @@
 		testUnaryOperators(aggType, auop);
 	}
 
-	
 }
diff --git a/src/test/java/org/apache/sysds/test/component/compress/TestBase.java b/src/test/java/org/apache/sysds/test/component/compress/TestBase.java
index 966242c..c6ab8d9 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/TestBase.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/TestBase.java
@@ -53,7 +53,7 @@
 	protected MatrixBlock mb;
 
 	public TestBase(SparsityType sparType, ValueType valType, ValueRange valueRange,
-		CompressionSettings compressionSettings, MatrixTypology MatrixTypology) {
+			CompressionSettings compressionSettings, MatrixTypology MatrixTypology) {
 
 		this.sparsity = TestConstants.getSparsityValue(sparType);
 		this.rows = TestConstants.getNumberOfRows(MatrixTypology);
@@ -63,7 +63,7 @@
 		this.min = TestConstants.getMinRangeValue(valueRange);
 
 		try {
-			switch(valType) {
+			switch (valType) {
 				case CONST:
 					this.min = this.max;
 					// Do not Break, utilize the RAND afterwards.
@@ -74,25 +74,26 @@
 					this.input = TestUtils.round(TestUtils.generateTestMatrix(rows, cols, min, max, sparsity, 7));
 					break;
 				case OLE_COMPRESSIBLE:
-					// Note the Compressible Input generator, generates an already Transposed input normally, therefore last
+					// Note the Compressible Input generator, generates an already Transposed input
+					// normally, therefore last
 					// argument is true, to build a "normal" matrix.
-					this.input = CompressibleInputGenerator
-						.getInputDoubleMatrix(rows, cols, CompressionType.OLE, (max - min) / 10, sparsity, 7, true);
+					this.input = CompressibleInputGenerator.getInputDoubleMatrix(rows, cols, CompressionType.OLE,
+							(max - min) / 10, max, min, sparsity, 7, true);
 					break;
 				case RLE_COMPRESSIBLE:
-					this.input = CompressibleInputGenerator
-						.getInputDoubleMatrix(rows, cols, CompressionType.RLE, (max - min) / 10, sparsity, 7, true);
+					this.input = CompressibleInputGenerator.getInputDoubleMatrix(rows, cols, CompressionType.RLE,
+							(max - min) / 10, max, min, sparsity, 7, true);
 					break;
 				default:
 					throw new NotImplementedException("Not Implemented Test Value type input generator");
 			}
-		
+
 		} catch (Exception e) {
 			e.printStackTrace();
-			assertTrue("Error in construction of input Test Base",false);
-			//TODO: handle exception
+			assertTrue("Error in construction of input Test Base", false);
+			// TODO: handle exception
 		}
-		
+
 		this.valRange = valueRange;
 		this.valType = valType;
 		this.compressionSettings = compressionSettings;
diff --git a/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateOLETest.java b/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateOLETest.java
index da2595e..97daf72 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateOLETest.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateOLETest.java
@@ -40,89 +40,89 @@
 
 		MatrixBlock mb;
 		// base tests
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{1}});
-		tests.add(new Object[] {mb, new int[]{1, 2, 2, 1}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0}});
-		tests.add(new Object[] {mb, new int[]{0, 1, 0, 0}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 0}});
-		tests.add(new Object[] {mb, new int[]{0, 1, 0, 0}, 0});
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 1 } });
+		tests.add(new Object[] { mb, new int[] { 1, 2, 2, 1 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0 } });
+		tests.add(new Object[] { mb, new int[] { 0, 1, 0, 0 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 0 } });
+		tests.add(new Object[] { mb, new int[] { 0, 1, 0, 0 }, 0 });
 
 		// The size of the compression increase at repeated values.
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 0}});
-		tests.add(new Object[] {mb, new int[]{1, 2, 2, 1}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 5, 0}});
-		tests.add(new Object[] {mb, new int[]{1, 2, 3, 1}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 5, 5, 0}});
-		tests.add(new Object[] {mb, new int[]{1, 2, 4, 1}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 5, 5, 5, 5, 5}});
-		tests.add(new Object[] {mb, new int[]{1, 2, 7, 1}, 0});
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 0 } });
+		tests.add(new Object[] { mb, new int[] { 1, 2, 2, 1 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 5, 0 } });
+		tests.add(new Object[] { mb, new int[] { 1, 2, 3, 1 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 5, 5, 0 } });
+		tests.add(new Object[] { mb, new int[] { 1, 2, 4, 1 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 5, 5, 5, 5, 5 } });
+		tests.add(new Object[] { mb, new int[] { 1, 2, 7, 1 }, 0 });
 
 		// all values grow by 1 if new value is introduced
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 7, 0}});
-		tests.add(new Object[] {mb, new int[]{2, 3, 4, 2}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 2, 1, 0}});
-		tests.add(new Object[] {mb, new int[]{3, 4, 6, 3}, 0});
-		mb = DataConverter.convertToMatrixBlock(new double[][] {{0, 0, 0, 0, 5, 2, 1, 3, 6, 7}});
-		tests.add(new Object[] {mb, new int[]{6, 7, 12, 6}, 0});
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 7, 0 } });
+		tests.add(new Object[] { mb, new int[] { 2, 3, 4, 2 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 2, 1, 0 } });
+		tests.add(new Object[] { mb, new int[] { 3, 4, 6, 3 }, 0 });
+		mb = DataConverter.convertToMatrixBlock(new double[][] { { 0, 0, 0, 0, 5, 2, 1, 3, 6, 7 } });
+		tests.add(new Object[] { mb, new int[] { 6, 7, 12, 6 }, 0 });
 
 		// Dense random... Horrible compression
 		mb = DataConverter.convertToMatrixBlock(TestUtils.generateTestMatrix(1, 100, 0, 100, 1.0, 7));
-		tests.add(new Object[] {mb, new int[]{100, 100 + 1, 200, 100}, 0});
+		tests.add(new Object[] { mb, new int[] { 100, 100 + 1, 200, 100 }, 0 });
 		mb = DataConverter.convertToMatrixBlock(TestUtils.generateTestMatrix(1, 1000, 0, 100, 1.0, 7));
-		tests.add(new Object[] {mb, new int[]{1000, 1000 + 1, 2000, 1000}, 0});
+		tests.add(new Object[] { mb, new int[] { 1000, 1000 + 1, 2000, 1000 }, 0 });
 		mb = DataConverter.convertToMatrixBlock(TestUtils.generateTestMatrix(1, 10000, 0, 100, 1.0, 7));
-		tests.add(new Object[] {mb, new int[]{10000, 10000 + 1, 20000, 10000}, 0});
+		tests.add(new Object[] { mb, new int[] { 10000, 10000 + 1, 20000, 10000 }, 0 });
 
 		// Random rounded numbers dense
 		mb = DataConverter.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1523, 0, 99, 1.0, 7)));
-		tests.add(new Object[] {mb, new int[]{99, 100, 1616, 99}, 0});
+		tests.add(new Object[] { mb, new int[] { 99, 100, 1616, 99 }, 0 });
 		mb = DataConverter.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 4000, 0, 255, 1.0, 7)));
-		tests.add(new Object[] {mb, new int[]{255, 256, 4250, 255}, 0});
+		tests.add(new Object[] { mb, new int[] { 255, 256, 4250, 255 }, 0 });
 
 		// Sparse rounded numbers
 		mb = DataConverter.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1523, 0, 99, 0.1, 7)));
-		tests.add(new Object[] {mb, new int[]{76, 77, 225, 76}, 0});
+		tests.add(new Object[] { mb, new int[] { 76, 77, 225, 76 }, 0 });
 		mb = DataConverter
-			.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1621, 0, 99, 0.1, 142)));
-		tests.add(new Object[] {mb, new int[]{81, 82, 238, 81}, 0});
+				.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1621, 0, 99, 0.1, 142)));
+		tests.add(new Object[] { mb, new int[] { 81, 82, 238, 81 }, 0 });
 		mb = DataConverter
-			.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 2321, 0, 99, 0.1, 512)));
-		tests.add(new Object[] {mb, new int[]{92, 93, 332, 92}, 0});
+				.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 2321, 0, 99, 0.1, 512)));
+		tests.add(new Object[] { mb, new int[] { 92, 93, 332, 92 }, 0 });
 		mb = DataConverter.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 4000, 0, 255, 0.1, 7)));
-		tests.add(new Object[] {mb, new int[]{195, 196, 573, 195}, 0});
+		tests.add(new Object[] { mb, new int[] { 195, 196, 573, 195 }, 0 });
 
 		mb = DataConverter.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1523, 0, 99, 0.5, 7)));
-		tests.add(new Object[] {mb, new int[]{98, 99, 826, 99}, 0});
+		tests.add(new Object[] { mb, new int[] { 98, 99, 826, 99 }, 0 });
 		mb = DataConverter
-			.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1621, 0, 99, 0.5, 142)));
-		tests.add(new Object[] {mb, new int[]{99, 100, 913, 99}, 0});
+				.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 1621, 0, 99, 0.5, 142)));
+		tests.add(new Object[] { mb, new int[] { 99, 100, 913, 99 }, 0 });
 		mb = DataConverter
-			.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 2321, 0, 99, 0.5, 512)));
-		tests.add(new Object[] {mb, new int[]{99, 100, 1292, 99}, 0});
+				.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 2321, 0, 99, 0.5, 512)));
+		tests.add(new Object[] { mb, new int[] { 99, 100, 1292, 99 }, 0 });
 		mb = DataConverter.convertToMatrixBlock(TestUtils.round(TestUtils.generateTestMatrix(1, 4000, 0, 255, 0.5, 7)));
-		tests.add(new Object[] {mb, new int[]{255, 256, 2208, 255}, 0});
+		tests.add(new Object[] { mb, new int[] { 255, 256, 2208, 255 }, 0 });
 
 		// Paper
-		mb = DataConverter
-			.convertToMatrixBlock(new double[][] {{7, 3, 7, 7, 3, 7, 3, 3, 7, 3}, {6, 4, 6, 5, 4, 5, 4, 4, 6, 4}});
-		tests.add(new Object[] {mb, new int[]{6, 4, 13, 3}, 0});
+		mb = DataConverter.convertToMatrixBlock(
+				new double[][] { { 7, 3, 7, 7, 3, 7, 3, 3, 7, 3 }, { 6, 4, 6, 5, 4, 5, 4, 4, 6, 4 } });
+		tests.add(new Object[] { mb, new int[] { 6, 4, 13, 3 }, 0 });
 
 		// Dream Inputs
-		int[] cols = new int[]{2,6,111};
-		int[] rows = new int[]{10,121,513};
-		int[] unique = new int[]{3,5};
-		for(int y : cols){
-			for (int x : rows){
-				for (int u : unique){
+		int[] cols = new int[] { 2, 6, 111 };
+		int[] rows = new int[] { 10, 121, 513 };
+		int[] unique = new int[] { 3, 5 };
+		for (int y : cols) {
+			for (int x : rows) {
+				for (int u : unique) {
 					mb = CompressibleInputGenerator.getInput(x, y, CompressionType.OLE, u, 1.0, 5);
-					tests.add(new Object[] {mb, new int[]{u * y, u + 1, x + u , u}, 0});
+					tests.add(new Object[] { mb, new int[] { u * y, u + 1, x + u, u }, 0 });
 				}
 			}
 		}
 
 		// Sparse test.
 		mb = CompressibleInputGenerator.getInput(571, 1, CompressionType.OLE, 40, 0.6, 5);
-		tests.add(new Object[] {mb, new int[]{40 * 1, 40 + 1, ((571 + 40) / 10 ) * 6 ,40}, 0});
+		tests.add(new Object[] { mb, new int[] { 40 * 1, 40 + 1, ((571 + 40) / 10) * 6, 40 }, 0 });
 
 		return tests;
 	}
diff --git a/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateRLETest.java b/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateRLETest.java
index 8d4b7fa..afab30f 100644
--- a/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateRLETest.java
+++ b/src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateRLETest.java
@@ -141,26 +141,21 @@
 		mb = CompressibleInputGenerator.getInput(1000000, 1, CompressionType.RLE, 1, 1.0, 132);
 		tests.add(new Object[] {mb, 1, 32, 0});
 
-
 		// Multi Column
 		// two identical columns 
 		mb = CompressibleInputGenerator.getInput(10, 2, CompressionType.RLE, 2, 1.0, 132);
 		tests.add(new Object[] {mb, 3, 6, 0});
 
 		mb = CompressibleInputGenerator.getInput(10, 6, CompressionType.RLE, 2, 1.0, 132);
-		// System.out.println(mb);
 		tests.add(new Object[] {mb, 5, 10, 0});
 
 		mb = CompressibleInputGenerator.getInput(10, 100, CompressionType.RLE, 2, 1.0, 132);
-		// System.out.println(mb);
 		tests.add(new Object[] {mb, 10, 20, 0});
 
 		mb = CompressibleInputGenerator.getInput(101, 17, CompressionType.RLE, 2, 1.0, 132);
-		// System.out.println(mb);
 		tests.add(new Object[] {mb, 15, 15*2, 0});
 
 		mb = CompressibleInputGenerator.getInput(101, 17, CompressionType.RLE, 3, 1.0, 132);
-		// System.out.println(mb);
 		tests.add(new Object[] {mb, 31, 62, 0});
 
 		return tests;